aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-s390')
-rw-r--r--include/asm-s390/Kbuild12
-rw-r--r--include/asm-s390/appldata.h90
-rw-r--r--include/asm-s390/atomic.h120
-rw-r--r--include/asm-s390/bitops.h626
-rw-r--r--include/asm-s390/byteorder.h50
-rw-r--r--include/asm-s390/checksum.h176
-rw-r--r--include/asm-s390/cio.h7
-rw-r--r--include/asm-s390/div64.h48
-rw-r--r--include/asm-s390/dma.h2
-rw-r--r--include/asm-s390/ebcdic.h20
-rw-r--r--include/asm-s390/futex.h87
-rw-r--r--include/asm-s390/io.h16
-rw-r--r--include/asm-s390/irq.h3
-rw-r--r--include/asm-s390/irqflags.h110
-rw-r--r--include/asm-s390/kdebug.h59
-rw-r--r--include/asm-s390/kprobes.h114
-rw-r--r--include/asm-s390/lowcore.h16
-rw-r--r--include/asm-s390/monwriter.h33
-rw-r--r--include/asm-s390/page.h111
-rw-r--r--include/asm-s390/percpu.h20
-rw-r--r--include/asm-s390/pgalloc.h67
-rw-r--r--include/asm-s390/pgtable.h218
-rw-r--r--include/asm-s390/processor.h149
-rw-r--r--include/asm-s390/ptrace.h2
-rw-r--r--include/asm-s390/rwsem.h238
-rw-r--r--include/asm-s390/semaphore.h16
-rw-r--r--include/asm-s390/setup.h67
-rw-r--r--include/asm-s390/sfp-machine.h64
-rw-r--r--include/asm-s390/sigp.h65
-rw-r--r--include/asm-s390/smp.h4
-rw-r--r--include/asm-s390/spinlock.h62
-rw-r--r--include/asm-s390/spinlock_types.h6
-rw-r--r--include/asm-s390/string.h56
-rw-r--r--include/asm-s390/system.h342
-rw-r--r--include/asm-s390/timex.h19
-rw-r--r--include/asm-s390/tlbflush.h32
-rw-r--r--include/asm-s390/uaccess.h181
-rw-r--r--include/asm-s390/unistd.h432
-rw-r--r--include/asm-s390/z90crypt.h212
-rw-r--r--include/asm-s390/zcrypt.h285
40 files changed, 2175 insertions, 2062 deletions
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
index ed8955f49e47..e92b429d2be1 100644
--- a/include/asm-s390/Kbuild
+++ b/include/asm-s390/Kbuild
@@ -1,4 +1,12 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3unifdef-y += cmb.h debug.h 3header-y += dasd.h
4header-y += dasd.h qeth.h tape390.h ucontext.h vtoc.h z90crypt.h 4header-y += monwriter.h
5header-y += qeth.h
6header-y += tape390.h
7header-y += ucontext.h
8header-y += vtoc.h
9header-y += zcrypt.h
10
11unifdef-y += cmb.h
12unifdef-y += debug.h
diff --git a/include/asm-s390/appldata.h b/include/asm-s390/appldata.h
new file mode 100644
index 000000000000..79283dac8281
--- /dev/null
+++ b/include/asm-s390/appldata.h
@@ -0,0 +1,90 @@
1/*
2 * include/asm-s390/appldata.h
3 *
4 * Copyright (C) IBM Corp. 2006
5 *
6 * Author(s): Melissa Howland <melissah@us.ibm.com>
7 */
8
9#ifndef _ASM_S390_APPLDATA_H
10#define _ASM_S390_APPLDATA_H
11
12#include <asm/io.h>
13
14#ifndef CONFIG_64BIT
15
16#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
17#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
18#define APPLDATA_GEN_EVENT_REC 0x02
19#define APPLDATA_START_CONFIG_REC 0x03
20
21/*
22 * Parameter list for DIAGNOSE X'DC'
23 */
24struct appldata_parameter_list {
25 u16 diag; /* The DIAGNOSE code X'00DC' */
26 u8 function; /* The function code for the DIAGNOSE */
27 u8 parlist_length; /* Length of the parameter list */
28 u32 product_id_addr; /* Address of the 16-byte product ID */
29 u16 reserved;
30 u16 buffer_length; /* Length of the application data buffer */
31 u32 buffer_addr; /* Address of the application data buffer */
32} __attribute__ ((packed));
33
34#else /* CONFIG_64BIT */
35
36#define APPLDATA_START_INTERVAL_REC 0x80
37#define APPLDATA_STOP_REC 0x81
38#define APPLDATA_GEN_EVENT_REC 0x82
39#define APPLDATA_START_CONFIG_REC 0x83
40
41/*
42 * Parameter list for DIAGNOSE X'DC'
43 */
44struct appldata_parameter_list {
45 u16 diag;
46 u8 function;
47 u8 parlist_length;
48 u32 unused01;
49 u16 reserved;
50 u16 buffer_length;
51 u32 unused02;
52 u64 product_id_addr;
53 u64 buffer_addr;
54} __attribute__ ((packed));
55
56#endif /* CONFIG_64BIT */
57
58struct appldata_product_id {
59 char prod_nr[7]; /* product number */
60 u16 prod_fn; /* product function */
61 u8 record_nr; /* record number */
62 u16 version_nr; /* version */
63 u16 release_nr; /* release */
64 u16 mod_lvl; /* modification level */
65} __attribute__ ((packed));
66
67static inline int appldata_asm(struct appldata_product_id *id,
68 unsigned short fn, void *buffer,
69 unsigned short length)
70{
71 struct appldata_parameter_list parm_list;
72 int ry;
73
74 if (!MACHINE_IS_VM)
75 return -ENOSYS;
76 parm_list.diag = 0xdc;
77 parm_list.function = fn;
78 parm_list.parlist_length = sizeof(parm_list);
79 parm_list.buffer_length = length;
80 parm_list.product_id_addr = (unsigned long) id;
81 parm_list.buffer_addr = virt_to_phys(buffer);
82 asm volatile(
83 " diag %1,%0,0xdc"
84 : "=d" (ry)
85 : "d" (&parm_list), "m" (parm_list), "m" (*id)
86 : "cc");
87 return ry;
88}
89
90#endif /* _ASM_S390_APPLDATA_H */
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index 399bf02894dd..af20c7462485 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -30,20 +30,43 @@ typedef struct {
30 30
31#ifdef __KERNEL__ 31#ifdef __KERNEL__
32 32
33#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
34
33#define __CS_LOOP(ptr, op_val, op_string) ({ \ 35#define __CS_LOOP(ptr, op_val, op_string) ({ \
34 typeof(ptr->counter) old_val, new_val; \ 36 typeof(ptr->counter) old_val, new_val; \
35 __asm__ __volatile__(" l %0,0(%3)\n" \ 37 asm volatile( \
36 "0: lr %1,%0\n" \ 38 " l %0,%2\n" \
37 op_string " %1,%4\n" \ 39 "0: lr %1,%0\n" \
38 " cs %0,%1,0(%3)\n" \ 40 op_string " %1,%3\n" \
39 " jl 0b" \ 41 " cs %0,%1,%2\n" \
40 : "=&d" (old_val), "=&d" (new_val), \ 42 " jl 0b" \
41 "=m" (((atomic_t *)(ptr))->counter) \ 43 : "=&d" (old_val), "=&d" (new_val), \
42 : "a" (ptr), "d" (op_val), \ 44 "=Q" (((atomic_t *)(ptr))->counter) \
43 "m" (((atomic_t *)(ptr))->counter) \ 45 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
44 : "cc", "memory" ); \ 46 : "cc", "memory"); \
45 new_val; \ 47 new_val; \
46}) 48})
49
50#else /* __GNUC__ */
51
52#define __CS_LOOP(ptr, op_val, op_string) ({ \
53 typeof(ptr->counter) old_val, new_val; \
54 asm volatile( \
55 " l %0,0(%3)\n" \
56 "0: lr %1,%0\n" \
57 op_string " %1,%4\n" \
58 " cs %0,%1,0(%3)\n" \
59 " jl 0b" \
60 : "=&d" (old_val), "=&d" (new_val), \
61 "=m" (((atomic_t *)(ptr))->counter) \
62 : "a" (ptr), "d" (op_val), \
63 "m" (((atomic_t *)(ptr))->counter) \
64 : "cc", "memory"); \
65 new_val; \
66})
67
68#endif /* __GNUC__ */
69
47#define atomic_read(v) ((v)->counter) 70#define atomic_read(v) ((v)->counter)
48#define atomic_set(v,i) (((v)->counter) = (i)) 71#define atomic_set(v,i) (((v)->counter) = (i))
49 72
@@ -81,10 +104,19 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
81 104
82static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) 105static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
83{ 106{
84 __asm__ __volatile__(" cs %0,%3,0(%2)\n" 107#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
85 : "+d" (old), "=m" (v->counter) 108 asm volatile(
86 : "a" (v), "d" (new), "m" (v->counter) 109 " cs %0,%2,%1"
87 : "cc", "memory" ); 110 : "+d" (old), "=Q" (v->counter)
111 : "d" (new), "Q" (v->counter)
112 : "cc", "memory");
113#else /* __GNUC__ */
114 asm volatile(
115 " cs %0,%3,0(%2)"
116 : "+d" (old), "=m" (v->counter)
117 : "a" (v), "d" (new), "m" (v->counter)
118 : "cc", "memory");
119#endif /* __GNUC__ */
88 return old; 120 return old;
89} 121}
90 122
@@ -113,20 +145,43 @@ typedef struct {
113} __attribute__ ((aligned (8))) atomic64_t; 145} __attribute__ ((aligned (8))) atomic64_t;
114#define ATOMIC64_INIT(i) { (i) } 146#define ATOMIC64_INIT(i) { (i) }
115 147
148#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
149
116#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 150#define __CSG_LOOP(ptr, op_val, op_string) ({ \
117 typeof(ptr->counter) old_val, new_val; \ 151 typeof(ptr->counter) old_val, new_val; \
118 __asm__ __volatile__(" lg %0,0(%3)\n" \ 152 asm volatile( \
119 "0: lgr %1,%0\n" \ 153 " lg %0,%2\n" \
120 op_string " %1,%4\n" \ 154 "0: lgr %1,%0\n" \
121 " csg %0,%1,0(%3)\n" \ 155 op_string " %1,%3\n" \
122 " jl 0b" \ 156 " csg %0,%1,%2\n" \
123 : "=&d" (old_val), "=&d" (new_val), \ 157 " jl 0b" \
124 "=m" (((atomic_t *)(ptr))->counter) \ 158 : "=&d" (old_val), "=&d" (new_val), \
125 : "a" (ptr), "d" (op_val), \ 159 "=Q" (((atomic_t *)(ptr))->counter) \
126 "m" (((atomic_t *)(ptr))->counter) \ 160 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
127 : "cc", "memory" ); \ 161 : "cc", "memory" ); \
128 new_val; \ 162 new_val; \
129}) 163})
164
165#else /* __GNUC__ */
166
167#define __CSG_LOOP(ptr, op_val, op_string) ({ \
168 typeof(ptr->counter) old_val, new_val; \
169 asm volatile( \
170 " lg %0,0(%3)\n" \
171 "0: lgr %1,%0\n" \
172 op_string " %1,%4\n" \
173 " csg %0,%1,0(%3)\n" \
174 " jl 0b" \
175 : "=&d" (old_val), "=&d" (new_val), \
176 "=m" (((atomic_t *)(ptr))->counter) \
177 : "a" (ptr), "d" (op_val), \
178 "m" (((atomic_t *)(ptr))->counter) \
179 : "cc", "memory" ); \
180 new_val; \
181})
182
183#endif /* __GNUC__ */
184
130#define atomic64_read(v) ((v)->counter) 185#define atomic64_read(v) ((v)->counter)
131#define atomic64_set(v,i) (((v)->counter) = (i)) 186#define atomic64_set(v,i) (((v)->counter) = (i))
132 187
@@ -163,10 +218,19 @@ static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
163static __inline__ long long atomic64_cmpxchg(atomic64_t *v, 218static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
164 long long old, long long new) 219 long long old, long long new)
165{ 220{
166 __asm__ __volatile__(" csg %0,%3,0(%2)\n" 221#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
167 : "+d" (old), "=m" (v->counter) 222 asm volatile(
168 : "a" (v), "d" (new), "m" (v->counter) 223 " csg %0,%2,%1"
169 : "cc", "memory" ); 224 : "+d" (old), "=Q" (v->counter)
225 : "d" (new), "Q" (v->counter)
226 : "cc", "memory");
227#else /* __GNUC__ */
228 asm volatile(
229 " csg %0,%3,0(%2)"
230 : "+d" (old), "=m" (v->counter)
231 : "a" (v), "d" (new), "m" (v->counter)
232 : "cc", "memory");
233#endif /* __GNUC__ */
170 return old; 234 return old;
171} 235}
172 236
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 0ddcdba79e4a..f79c9b792af1 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -67,16 +67,35 @@ extern const char _sb_findmap[];
67#define __BITOPS_AND "nr" 67#define __BITOPS_AND "nr"
68#define __BITOPS_XOR "xr" 68#define __BITOPS_XOR "xr"
69 69
70#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 70#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
71 __asm__ __volatile__(" l %0,0(%4)\n" \ 71
72 "0: lr %1,%0\n" \ 72#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
73 __op_string " %1,%3\n" \ 73 asm volatile( \
74 " cs %0,%1,0(%4)\n" \ 74 " l %0,%2\n" \
75 " jl 0b" \ 75 "0: lr %1,%0\n" \
76 : "=&d" (__old), "=&d" (__new), \ 76 __op_string " %1,%3\n" \
77 "=m" (*(unsigned long *) __addr) \ 77 " cs %0,%1,%2\n" \
78 : "d" (__val), "a" (__addr), \ 78 " jl 0b" \
79 "m" (*(unsigned long *) __addr) : "cc" ); 79 : "=&d" (__old), "=&d" (__new), \
80 "=Q" (*(unsigned long *) __addr) \
81 : "d" (__val), "Q" (*(unsigned long *) __addr) \
82 : "cc");
83
84#else /* __GNUC__ */
85
86#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
87 asm volatile( \
88 " l %0,0(%4)\n" \
89 "0: lr %1,%0\n" \
90 __op_string " %1,%3\n" \
91 " cs %0,%1,0(%4)\n" \
92 " jl 0b" \
93 : "=&d" (__old), "=&d" (__new), \
94 "=m" (*(unsigned long *) __addr) \
95 : "d" (__val), "a" (__addr), \
96 "m" (*(unsigned long *) __addr) : "cc");
97
98#endif /* __GNUC__ */
80 99
81#else /* __s390x__ */ 100#else /* __s390x__ */
82 101
@@ -86,21 +105,41 @@ extern const char _sb_findmap[];
86#define __BITOPS_AND "ngr" 105#define __BITOPS_AND "ngr"
87#define __BITOPS_XOR "xgr" 106#define __BITOPS_XOR "xgr"
88 107
89#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 108#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
90 __asm__ __volatile__(" lg %0,0(%4)\n" \ 109
91 "0: lgr %1,%0\n" \ 110#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
92 __op_string " %1,%3\n" \ 111 asm volatile( \
93 " csg %0,%1,0(%4)\n" \ 112 " lg %0,%2\n" \
94 " jl 0b" \ 113 "0: lgr %1,%0\n" \
95 : "=&d" (__old), "=&d" (__new), \ 114 __op_string " %1,%3\n" \
96 "=m" (*(unsigned long *) __addr) \ 115 " csg %0,%1,%2\n" \
97 : "d" (__val), "a" (__addr), \ 116 " jl 0b" \
98 "m" (*(unsigned long *) __addr) : "cc" ); 117 : "=&d" (__old), "=&d" (__new), \
118 "=Q" (*(unsigned long *) __addr) \
119 : "d" (__val), "Q" (*(unsigned long *) __addr) \
120 : "cc");
121
122#else /* __GNUC__ */
123
124#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
125 asm volatile( \
126 " lg %0,0(%4)\n" \
127 "0: lgr %1,%0\n" \
128 __op_string " %1,%3\n" \
129 " csg %0,%1,0(%4)\n" \
130 " jl 0b" \
131 : "=&d" (__old), "=&d" (__new), \
132 "=m" (*(unsigned long *) __addr) \
133 : "d" (__val), "a" (__addr), \
134 "m" (*(unsigned long *) __addr) : "cc");
135
136
137#endif /* __GNUC__ */
99 138
100#endif /* __s390x__ */ 139#endif /* __s390x__ */
101 140
102#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 141#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
103#define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" ) 142#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
104 143
105#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
106/* 145/*
@@ -217,10 +256,10 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
217 unsigned long addr; 256 unsigned long addr;
218 257
219 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 258 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
220 asm volatile("oc 0(1,%1),0(%2)" 259 asm volatile(
221 : "=m" (*(char *) addr) 260 " oc 0(1,%1),0(%2)"
222 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 261 : "=m" (*(char *) addr) : "a" (addr),
223 "m" (*(char *) addr) : "cc" ); 262 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
224} 263}
225 264
226static inline void 265static inline void
@@ -229,40 +268,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
229 unsigned long addr; 268 unsigned long addr;
230 269
231 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 270 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
232 switch (nr&7) { 271 *(unsigned char *) addr |= 1 << (nr & 7);
233 case 0:
234 asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
235 : "a" (addr), "m" (*(char *) addr) : "cc" );
236 break;
237 case 1:
238 asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
239 : "a" (addr), "m" (*(char *) addr) : "cc" );
240 break;
241 case 2:
242 asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
243 : "a" (addr), "m" (*(char *) addr) : "cc" );
244 break;
245 case 3:
246 asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
247 : "a" (addr), "m" (*(char *) addr) : "cc" );
248 break;
249 case 4:
250 asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
251 : "a" (addr), "m" (*(char *) addr) : "cc" );
252 break;
253 case 5:
254 asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
255 : "a" (addr), "m" (*(char *) addr) : "cc" );
256 break;
257 case 6:
258 asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
259 : "a" (addr), "m" (*(char *) addr) : "cc" );
260 break;
261 case 7:
262 asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
263 : "a" (addr), "m" (*(char *) addr) : "cc" );
264 break;
265 }
266} 272}
267 273
268#define set_bit_simple(nr,addr) \ 274#define set_bit_simple(nr,addr) \
@@ -279,10 +285,10 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
279 unsigned long addr; 285 unsigned long addr;
280 286
281 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 287 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
282 asm volatile("nc 0(1,%1),0(%2)" 288 asm volatile(
283 : "=m" (*(char *) addr) 289 " nc 0(1,%1),0(%2)"
284 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 290 : "=m" (*(char *) addr) : "a" (addr),
285 "m" (*(char *) addr) : "cc" ); 291 "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc");
286} 292}
287 293
288static inline void 294static inline void
@@ -291,40 +297,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
291 unsigned long addr; 297 unsigned long addr;
292 298
293 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 299 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
294 switch (nr&7) { 300 *(unsigned char *) addr &= ~(1 << (nr & 7));
295 case 0:
296 asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
297 : "a" (addr), "m" (*(char *) addr) : "cc" );
298 break;
299 case 1:
300 asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
301 : "a" (addr), "m" (*(char *) addr) : "cc" );
302 break;
303 case 2:
304 asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
305 : "a" (addr), "m" (*(char *) addr) : "cc" );
306 break;
307 case 3:
308 asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
309 : "a" (addr), "m" (*(char *) addr) : "cc" );
310 break;
311 case 4:
312 asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
313 : "a" (addr), "m" (*(char *) addr) : "cc" );
314 break;
315 case 5:
316 asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
317 : "a" (addr), "m" (*(char *) addr) : "cc" );
318 break;
319 case 6:
320 asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
321 : "a" (addr), "m" (*(char *) addr) : "cc" );
322 break;
323 case 7:
324 asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
325 : "a" (addr), "m" (*(char *) addr) : "cc" );
326 break;
327 }
328} 301}
329 302
330#define clear_bit_simple(nr,addr) \ 303#define clear_bit_simple(nr,addr) \
@@ -340,10 +313,10 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
340 unsigned long addr; 313 unsigned long addr;
341 314
342 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 315 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
343 asm volatile("xc 0(1,%1),0(%2)" 316 asm volatile(
344 : "=m" (*(char *) addr) 317 " xc 0(1,%1),0(%2)"
345 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 318 : "=m" (*(char *) addr) : "a" (addr),
346 "m" (*(char *) addr) : "cc" ); 319 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
347} 320}
348 321
349static inline void 322static inline void
@@ -352,40 +325,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
352 unsigned long addr; 325 unsigned long addr;
353 326
354 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 327 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
355 switch (nr&7) { 328 *(unsigned char *) addr ^= 1 << (nr & 7);
356 case 0:
357 asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
358 : "a" (addr), "m" (*(char *) addr) : "cc" );
359 break;
360 case 1:
361 asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
362 : "a" (addr), "m" (*(char *) addr) : "cc" );
363 break;
364 case 2:
365 asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
366 : "a" (addr), "m" (*(char *) addr) : "cc" );
367 break;
368 case 3:
369 asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
370 : "a" (addr), "m" (*(char *) addr) : "cc" );
371 break;
372 case 4:
373 asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
374 : "a" (addr), "m" (*(char *) addr) : "cc" );
375 break;
376 case 5:
377 asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
378 : "a" (addr), "m" (*(char *) addr) : "cc" );
379 break;
380 case 6:
381 asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
382 : "a" (addr), "m" (*(char *) addr) : "cc" );
383 break;
384 case 7:
385 asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
386 : "a" (addr), "m" (*(char *) addr) : "cc" );
387 break;
388 }
389} 329}
390 330
391#define change_bit_simple(nr,addr) \ 331#define change_bit_simple(nr,addr) \
@@ -404,10 +344,11 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
404 344
405 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 345 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
406 ch = *(unsigned char *) addr; 346 ch = *(unsigned char *) addr;
407 asm volatile("oc 0(1,%1),0(%2)" 347 asm volatile(
408 : "=m" (*(char *) addr) 348 " oc 0(1,%1),0(%2)"
409 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 349 : "=m" (*(char *) addr)
410 "m" (*(char *) addr) : "cc", "memory" ); 350 : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
351 "m" (*(char *) addr) : "cc", "memory");
411 return (ch >> (nr & 7)) & 1; 352 return (ch >> (nr & 7)) & 1;
412} 353}
413#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 354#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
@@ -423,10 +364,11 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
423 364
424 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 365 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
425 ch = *(unsigned char *) addr; 366 ch = *(unsigned char *) addr;
426 asm volatile("nc 0(1,%1),0(%2)" 367 asm volatile(
427 : "=m" (*(char *) addr) 368 " nc 0(1,%1),0(%2)"
428 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 369 : "=m" (*(char *) addr)
429 "m" (*(char *) addr) : "cc", "memory" ); 370 : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
371 "m" (*(char *) addr) : "cc", "memory");
430 return (ch >> (nr & 7)) & 1; 372 return (ch >> (nr & 7)) & 1;
431} 373}
432#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 374#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
@@ -442,10 +384,11 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
442 384
443 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 385 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
444 ch = *(unsigned char *) addr; 386 ch = *(unsigned char *) addr;
445 asm volatile("xc 0(1,%1),0(%2)" 387 asm volatile(
446 : "=m" (*(char *) addr) 388 " xc 0(1,%1),0(%2)"
447 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 389 : "=m" (*(char *) addr)
448 "m" (*(char *) addr) : "cc", "memory" ); 390 : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
391 "m" (*(char *) addr) : "cc", "memory");
449 return (ch >> (nr & 7)) & 1; 392 return (ch >> (nr & 7)) & 1;
450} 393}
451#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 394#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
@@ -557,35 +500,36 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size)
557 500
558 if (!size) 501 if (!size)
559 return 0; 502 return 0;
560 __asm__(" lhi %1,-1\n" 503 asm volatile(
561 " lr %2,%3\n" 504 " lhi %1,-1\n"
562 " slr %0,%0\n" 505 " lr %2,%3\n"
563 " ahi %2,31\n" 506 " slr %0,%0\n"
564 " srl %2,5\n" 507 " ahi %2,31\n"
565 "0: c %1,0(%0,%4)\n" 508 " srl %2,5\n"
566 " jne 1f\n" 509 "0: c %1,0(%0,%4)\n"
567 " la %0,4(%0)\n" 510 " jne 1f\n"
568 " brct %2,0b\n" 511 " la %0,4(%0)\n"
569 " lr %0,%3\n" 512 " brct %2,0b\n"
570 " j 4f\n" 513 " lr %0,%3\n"
571 "1: l %2,0(%0,%4)\n" 514 " j 4f\n"
572 " sll %0,3\n" 515 "1: l %2,0(%0,%4)\n"
573 " lhi %1,0xff\n" 516 " sll %0,3\n"
574 " tml %2,0xffff\n" 517 " lhi %1,0xff\n"
575 " jno 2f\n" 518 " tml %2,0xffff\n"
576 " ahi %0,16\n" 519 " jno 2f\n"
577 " srl %2,16\n" 520 " ahi %0,16\n"
578 "2: tml %2,0x00ff\n" 521 " srl %2,16\n"
579 " jno 3f\n" 522 "2: tml %2,0x00ff\n"
580 " ahi %0,8\n" 523 " jno 3f\n"
581 " srl %2,8\n" 524 " ahi %0,8\n"
582 "3: nr %2,%1\n" 525 " srl %2,8\n"
583 " ic %2,0(%2,%5)\n" 526 "3: nr %2,%1\n"
584 " alr %0,%2\n" 527 " ic %2,0(%2,%5)\n"
585 "4:" 528 " alr %0,%2\n"
586 : "=&a" (res), "=&d" (cmp), "=&a" (count) 529 "4:"
587 : "a" (size), "a" (addr), "a" (&_zb_findmap), 530 : "=&a" (res), "=&d" (cmp), "=&a" (count)
588 "m" (*(addrtype *) addr) : "cc" ); 531 : "a" (size), "a" (addr), "a" (&_zb_findmap),
532 "m" (*(addrtype *) addr) : "cc");
589 return (res < size) ? res : size; 533 return (res < size) ? res : size;
590} 534}
591 535
@@ -598,35 +542,36 @@ find_first_bit(const unsigned long * addr, unsigned long size)
598 542
599 if (!size) 543 if (!size)
600 return 0; 544 return 0;
601 __asm__(" slr %1,%1\n" 545 asm volatile(
602 " lr %2,%3\n" 546 " slr %1,%1\n"
603 " slr %0,%0\n" 547 " lr %2,%3\n"
604 " ahi %2,31\n" 548 " slr %0,%0\n"
605 " srl %2,5\n" 549 " ahi %2,31\n"
606 "0: c %1,0(%0,%4)\n" 550 " srl %2,5\n"
607 " jne 1f\n" 551 "0: c %1,0(%0,%4)\n"
608 " la %0,4(%0)\n" 552 " jne 1f\n"
609 " brct %2,0b\n" 553 " la %0,4(%0)\n"
610 " lr %0,%3\n" 554 " brct %2,0b\n"
611 " j 4f\n" 555 " lr %0,%3\n"
612 "1: l %2,0(%0,%4)\n" 556 " j 4f\n"
613 " sll %0,3\n" 557 "1: l %2,0(%0,%4)\n"
614 " lhi %1,0xff\n" 558 " sll %0,3\n"
615 " tml %2,0xffff\n" 559 " lhi %1,0xff\n"
616 " jnz 2f\n" 560 " tml %2,0xffff\n"
617 " ahi %0,16\n" 561 " jnz 2f\n"
618 " srl %2,16\n" 562 " ahi %0,16\n"
619 "2: tml %2,0x00ff\n" 563 " srl %2,16\n"
620 " jnz 3f\n" 564 "2: tml %2,0x00ff\n"
621 " ahi %0,8\n" 565 " jnz 3f\n"
622 " srl %2,8\n" 566 " ahi %0,8\n"
623 "3: nr %2,%1\n" 567 " srl %2,8\n"
624 " ic %2,0(%2,%5)\n" 568 "3: nr %2,%1\n"
625 " alr %0,%2\n" 569 " ic %2,0(%2,%5)\n"
626 "4:" 570 " alr %0,%2\n"
627 : "=&a" (res), "=&d" (cmp), "=&a" (count) 571 "4:"
628 : "a" (size), "a" (addr), "a" (&_sb_findmap), 572 : "=&a" (res), "=&d" (cmp), "=&a" (count)
629 "m" (*(addrtype *) addr) : "cc" ); 573 : "a" (size), "a" (addr), "a" (&_sb_findmap),
574 "m" (*(addrtype *) addr) : "cc");
630 return (res < size) ? res : size; 575 return (res < size) ? res : size;
631} 576}
632 577
@@ -640,39 +585,40 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size)
640 585
641 if (!size) 586 if (!size)
642 return 0; 587 return 0;
643 __asm__(" lghi %1,-1\n" 588 asm volatile(
644 " lgr %2,%3\n" 589 " lghi %1,-1\n"
645 " slgr %0,%0\n" 590 " lgr %2,%3\n"
646 " aghi %2,63\n" 591 " slgr %0,%0\n"
647 " srlg %2,%2,6\n" 592 " aghi %2,63\n"
648 "0: cg %1,0(%0,%4)\n" 593 " srlg %2,%2,6\n"
649 " jne 1f\n" 594 "0: cg %1,0(%0,%4)\n"
650 " la %0,8(%0)\n" 595 " jne 1f\n"
651 " brct %2,0b\n" 596 " la %0,8(%0)\n"
652 " lgr %0,%3\n" 597 " brct %2,0b\n"
653 " j 5f\n" 598 " lgr %0,%3\n"
654 "1: lg %2,0(%0,%4)\n" 599 " j 5f\n"
655 " sllg %0,%0,3\n" 600 "1: lg %2,0(%0,%4)\n"
656 " clr %2,%1\n" 601 " sllg %0,%0,3\n"
657 " jne 2f\n" 602 " clr %2,%1\n"
658 " aghi %0,32\n" 603 " jne 2f\n"
659 " srlg %2,%2,32\n" 604 " aghi %0,32\n"
660 "2: lghi %1,0xff\n" 605 " srlg %2,%2,32\n"
661 " tmll %2,0xffff\n" 606 "2: lghi %1,0xff\n"
662 " jno 3f\n" 607 " tmll %2,0xffff\n"
663 " aghi %0,16\n" 608 " jno 3f\n"
664 " srl %2,16\n" 609 " aghi %0,16\n"
665 "3: tmll %2,0x00ff\n" 610 " srl %2,16\n"
666 " jno 4f\n" 611 "3: tmll %2,0x00ff\n"
667 " aghi %0,8\n" 612 " jno 4f\n"
668 " srl %2,8\n" 613 " aghi %0,8\n"
669 "4: ngr %2,%1\n" 614 " srl %2,8\n"
670 " ic %2,0(%2,%5)\n" 615 "4: ngr %2,%1\n"
671 " algr %0,%2\n" 616 " ic %2,0(%2,%5)\n"
672 "5:" 617 " algr %0,%2\n"
673 : "=&a" (res), "=&d" (cmp), "=&a" (count) 618 "5:"
619 : "=&a" (res), "=&d" (cmp), "=&a" (count)
674 : "a" (size), "a" (addr), "a" (&_zb_findmap), 620 : "a" (size), "a" (addr), "a" (&_zb_findmap),
675 "m" (*(addrtype *) addr) : "cc" ); 621 "m" (*(addrtype *) addr) : "cc");
676 return (res < size) ? res : size; 622 return (res < size) ? res : size;
677} 623}
678 624
@@ -684,39 +630,40 @@ find_first_bit(const unsigned long * addr, unsigned long size)
684 630
685 if (!size) 631 if (!size)
686 return 0; 632 return 0;
687 __asm__(" slgr %1,%1\n" 633 asm volatile(
688 " lgr %2,%3\n" 634 " slgr %1,%1\n"
689 " slgr %0,%0\n" 635 " lgr %2,%3\n"
690 " aghi %2,63\n" 636 " slgr %0,%0\n"
691 " srlg %2,%2,6\n" 637 " aghi %2,63\n"
692 "0: cg %1,0(%0,%4)\n" 638 " srlg %2,%2,6\n"
693 " jne 1f\n" 639 "0: cg %1,0(%0,%4)\n"
694 " aghi %0,8\n" 640 " jne 1f\n"
695 " brct %2,0b\n" 641 " aghi %0,8\n"
696 " lgr %0,%3\n" 642 " brct %2,0b\n"
697 " j 5f\n" 643 " lgr %0,%3\n"
698 "1: lg %2,0(%0,%4)\n" 644 " j 5f\n"
699 " sllg %0,%0,3\n" 645 "1: lg %2,0(%0,%4)\n"
700 " clr %2,%1\n" 646 " sllg %0,%0,3\n"
701 " jne 2f\n" 647 " clr %2,%1\n"
702 " aghi %0,32\n" 648 " jne 2f\n"
703 " srlg %2,%2,32\n" 649 " aghi %0,32\n"
704 "2: lghi %1,0xff\n" 650 " srlg %2,%2,32\n"
705 " tmll %2,0xffff\n" 651 "2: lghi %1,0xff\n"
706 " jnz 3f\n" 652 " tmll %2,0xffff\n"
707 " aghi %0,16\n" 653 " jnz 3f\n"
708 " srl %2,16\n" 654 " aghi %0,16\n"
709 "3: tmll %2,0x00ff\n" 655 " srl %2,16\n"
710 " jnz 4f\n" 656 "3: tmll %2,0x00ff\n"
711 " aghi %0,8\n" 657 " jnz 4f\n"
712 " srl %2,8\n" 658 " aghi %0,8\n"
713 "4: ngr %2,%1\n" 659 " srl %2,8\n"
714 " ic %2,0(%2,%5)\n" 660 "4: ngr %2,%1\n"
715 " algr %0,%2\n" 661 " ic %2,0(%2,%5)\n"
716 "5:" 662 " algr %0,%2\n"
717 : "=&a" (res), "=&d" (cmp), "=&a" (count) 663 "5:"
664 : "=&a" (res), "=&d" (cmp), "=&a" (count)
718 : "a" (size), "a" (addr), "a" (&_sb_findmap), 665 : "a" (size), "a" (addr), "a" (&_sb_findmap),
719 "m" (*(addrtype *) addr) : "cc" ); 666 "m" (*(addrtype *) addr) : "cc");
720 return (res < size) ? res : size; 667 return (res < size) ? res : size;
721} 668}
722 669
@@ -832,36 +779,37 @@ ext2_find_first_zero_bit(void *vaddr, unsigned int size)
832 779
833 if (!size) 780 if (!size)
834 return 0; 781 return 0;
835 __asm__(" lhi %1,-1\n" 782 asm volatile(
836 " lr %2,%3\n" 783 " lhi %1,-1\n"
837 " ahi %2,31\n" 784 " lr %2,%3\n"
838 " srl %2,5\n" 785 " ahi %2,31\n"
839 " slr %0,%0\n" 786 " srl %2,5\n"
840 "0: cl %1,0(%0,%4)\n" 787 " slr %0,%0\n"
841 " jne 1f\n" 788 "0: cl %1,0(%0,%4)\n"
842 " ahi %0,4\n" 789 " jne 1f\n"
843 " brct %2,0b\n" 790 " ahi %0,4\n"
844 " lr %0,%3\n" 791 " brct %2,0b\n"
845 " j 4f\n" 792 " lr %0,%3\n"
846 "1: l %2,0(%0,%4)\n" 793 " j 4f\n"
847 " sll %0,3\n" 794 "1: l %2,0(%0,%4)\n"
848 " ahi %0,24\n" 795 " sll %0,3\n"
849 " lhi %1,0xff\n" 796 " ahi %0,24\n"
850 " tmh %2,0xffff\n" 797 " lhi %1,0xff\n"
851 " jo 2f\n" 798 " tmh %2,0xffff\n"
852 " ahi %0,-16\n" 799 " jo 2f\n"
853 " srl %2,16\n" 800 " ahi %0,-16\n"
854 "2: tml %2,0xff00\n" 801 " srl %2,16\n"
855 " jo 3f\n" 802 "2: tml %2,0xff00\n"
856 " ahi %0,-8\n" 803 " jo 3f\n"
857 " srl %2,8\n" 804 " ahi %0,-8\n"
858 "3: nr %2,%1\n" 805 " srl %2,8\n"
859 " ic %2,0(%2,%5)\n" 806 "3: nr %2,%1\n"
860 " alr %0,%2\n" 807 " ic %2,0(%2,%5)\n"
861 "4:" 808 " alr %0,%2\n"
862 : "=&a" (res), "=&d" (cmp), "=&a" (count) 809 "4:"
863 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 810 : "=&a" (res), "=&d" (cmp), "=&a" (count)
864 "m" (*(addrtype *) vaddr) : "cc" ); 811 : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
812 "m" (*(addrtype *) vaddr) : "cc");
865 return (res < size) ? res : size; 813 return (res < size) ? res : size;
866} 814}
867 815
@@ -875,39 +823,40 @@ ext2_find_first_zero_bit(void *vaddr, unsigned long size)
875 823
876 if (!size) 824 if (!size)
877 return 0; 825 return 0;
878 __asm__(" lghi %1,-1\n" 826 asm volatile(
879 " lgr %2,%3\n" 827 " lghi %1,-1\n"
880 " aghi %2,63\n" 828 " lgr %2,%3\n"
881 " srlg %2,%2,6\n" 829 " aghi %2,63\n"
882 " slgr %0,%0\n" 830 " srlg %2,%2,6\n"
883 "0: clg %1,0(%0,%4)\n" 831 " slgr %0,%0\n"
884 " jne 1f\n" 832 "0: clg %1,0(%0,%4)\n"
885 " aghi %0,8\n" 833 " jne 1f\n"
886 " brct %2,0b\n" 834 " aghi %0,8\n"
887 " lgr %0,%3\n" 835 " brct %2,0b\n"
888 " j 5f\n" 836 " lgr %0,%3\n"
889 "1: cl %1,0(%0,%4)\n" 837 " j 5f\n"
890 " jne 2f\n" 838 "1: cl %1,0(%0,%4)\n"
891 " aghi %0,4\n" 839 " jne 2f\n"
892 "2: l %2,0(%0,%4)\n" 840 " aghi %0,4\n"
893 " sllg %0,%0,3\n" 841 "2: l %2,0(%0,%4)\n"
894 " aghi %0,24\n" 842 " sllg %0,%0,3\n"
895 " lghi %1,0xff\n" 843 " aghi %0,24\n"
896 " tmlh %2,0xffff\n" 844 " lghi %1,0xff\n"
897 " jo 3f\n" 845 " tmlh %2,0xffff\n"
898 " aghi %0,-16\n" 846 " jo 3f\n"
899 " srl %2,16\n" 847 " aghi %0,-16\n"
900 "3: tmll %2,0xff00\n" 848 " srl %2,16\n"
901 " jo 4f\n" 849 "3: tmll %2,0xff00\n"
902 " aghi %0,-8\n" 850 " jo 4f\n"
903 " srl %2,8\n" 851 " aghi %0,-8\n"
904 "4: ngr %2,%1\n" 852 " srl %2,8\n"
905 " ic %2,0(%2,%5)\n" 853 "4: ngr %2,%1\n"
906 " algr %0,%2\n" 854 " ic %2,0(%2,%5)\n"
907 "5:" 855 " algr %0,%2\n"
908 : "=&a" (res), "=&d" (cmp), "=&a" (count) 856 "5:"
857 : "=&a" (res), "=&d" (cmp), "=&a" (count)
909 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 858 : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
910 "m" (*(addrtype *) vaddr) : "cc" ); 859 "m" (*(addrtype *) vaddr) : "cc");
911 return (res < size) ? res : size; 860 return (res < size) ? res : size;
912} 861}
913 862
@@ -927,13 +876,16 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
927 p = addr + offset / __BITOPS_WORDSIZE; 876 p = addr + offset / __BITOPS_WORDSIZE;
928 if (bit) { 877 if (bit) {
929#ifndef __s390x__ 878#ifndef __s390x__
930 asm(" ic %0,0(%1)\n" 879 asm volatile(
931 " icm %0,2,1(%1)\n" 880 " ic %0,0(%1)\n"
932 " icm %0,4,2(%1)\n" 881 " icm %0,2,1(%1)\n"
933 " icm %0,8,3(%1)" 882 " icm %0,4,2(%1)\n"
934 : "=&a" (word) : "a" (p), "m" (*p) : "cc" ); 883 " icm %0,8,3(%1)"
884 : "=&a" (word) : "a" (p), "m" (*p) : "cc");
935#else 885#else
936 asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) ); 886 asm volatile(
887 " lrvg %0,%1"
888 : "=a" (word) : "m" (*p) );
937#endif 889#endif
938 /* 890 /*
939 * s390 version of ffz returns __BITOPS_WORDSIZE 891 * s390 version of ffz returns __BITOPS_WORDSIZE
diff --git a/include/asm-s390/byteorder.h b/include/asm-s390/byteorder.h
index 2cc35a0e188e..1fe2492baa8d 100644
--- a/include/asm-s390/byteorder.h
+++ b/include/asm-s390/byteorder.h
@@ -14,60 +14,54 @@
14#ifdef __GNUC__ 14#ifdef __GNUC__
15 15
16#ifdef __s390x__ 16#ifdef __s390x__
17static __inline__ __u64 ___arch__swab64p(const __u64 *x) 17static inline __u64 ___arch__swab64p(const __u64 *x)
18{ 18{
19 __u64 result; 19 __u64 result;
20 20
21 __asm__ __volatile__ ( 21 asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
22 " lrvg %0,%1"
23 : "=d" (result) : "m" (*x) );
24 return result; 22 return result;
25} 23}
26 24
27static __inline__ __u64 ___arch__swab64(__u64 x) 25static inline __u64 ___arch__swab64(__u64 x)
28{ 26{
29 __u64 result; 27 __u64 result;
30 28
31 __asm__ __volatile__ ( 29 asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
32 " lrvgr %0,%1"
33 : "=d" (result) : "d" (x) );
34 return result; 30 return result;
35} 31}
36 32
37static __inline__ void ___arch__swab64s(__u64 *x) 33static inline void ___arch__swab64s(__u64 *x)
38{ 34{
39 *x = ___arch__swab64p(x); 35 *x = ___arch__swab64p(x);
40} 36}
41#endif /* __s390x__ */ 37#endif /* __s390x__ */
42 38
43static __inline__ __u32 ___arch__swab32p(const __u32 *x) 39static inline __u32 ___arch__swab32p(const __u32 *x)
44{ 40{
45 __u32 result; 41 __u32 result;
46 42
47 __asm__ __volatile__ ( 43 asm volatile(
48#ifndef __s390x__ 44#ifndef __s390x__
49 " icm %0,8,3(%1)\n" 45 " icm %0,8,3(%1)\n"
50 " icm %0,4,2(%1)\n" 46 " icm %0,4,2(%1)\n"
51 " icm %0,2,1(%1)\n" 47 " icm %0,2,1(%1)\n"
52 " ic %0,0(%1)" 48 " ic %0,0(%1)"
53 : "=&d" (result) : "a" (x), "m" (*x) : "cc" ); 49 : "=&d" (result) : "a" (x), "m" (*x) : "cc");
54#else /* __s390x__ */ 50#else /* __s390x__ */
55 " lrv %0,%1" 51 " lrv %0,%1"
56 : "=d" (result) : "m" (*x) ); 52 : "=d" (result) : "m" (*x));
57#endif /* __s390x__ */ 53#endif /* __s390x__ */
58 return result; 54 return result;
59} 55}
60 56
61static __inline__ __u32 ___arch__swab32(__u32 x) 57static inline __u32 ___arch__swab32(__u32 x)
62{ 58{
63#ifndef __s390x__ 59#ifndef __s390x__
64 return ___arch__swab32p(&x); 60 return ___arch__swab32p(&x);
65#else /* __s390x__ */ 61#else /* __s390x__ */
66 __u32 result; 62 __u32 result;
67 63
68 __asm__ __volatile__ ( 64 asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
69 " lrvr %0,%1"
70 : "=d" (result) : "d" (x) );
71 return result; 65 return result;
72#endif /* __s390x__ */ 66#endif /* __s390x__ */
73} 67}
@@ -81,14 +75,14 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x)
81{ 75{
82 __u16 result; 76 __u16 result;
83 77
84 __asm__ __volatile__ ( 78 asm volatile(
85#ifndef __s390x__ 79#ifndef __s390x__
86 " icm %0,2,1(%1)\n" 80 " icm %0,2,1(%1)\n"
87 " ic %0,0(%1)\n" 81 " ic %0,0(%1)\n"
88 : "=&d" (result) : "a" (x), "m" (*x) : "cc" ); 82 : "=&d" (result) : "a" (x), "m" (*x) : "cc");
89#else /* __s390x__ */ 83#else /* __s390x__ */
90 " lrvh %0,%1" 84 " lrvh %0,%1"
91 : "=d" (result) : "m" (*x) ); 85 : "=d" (result) : "m" (*x));
92#endif /* __s390x__ */ 86#endif /* __s390x__ */
93 return result; 87 return result;
94} 88}
diff --git a/include/asm-s390/checksum.h b/include/asm-s390/checksum.h
index 471f2af2b16a..37c362d89fad 100644
--- a/include/asm-s390/checksum.h
+++ b/include/asm-s390/checksum.h
@@ -30,57 +30,13 @@
30static inline unsigned int 30static inline unsigned int
31csum_partial(const unsigned char * buff, int len, unsigned int sum) 31csum_partial(const unsigned char * buff, int len, unsigned int sum)
32{ 32{
33 /* 33 register unsigned long reg2 asm("2") = (unsigned long) buff;
34 * Experiments with ethernet and slip connections show that buf 34 register unsigned long reg3 asm("3") = (unsigned long) len;
35 * is aligned on either a 2-byte or 4-byte boundary.
36 */
37#ifndef __s390x__
38 register_pair rp;
39
40 rp.subreg.even = (unsigned long) buff;
41 rp.subreg.odd = (unsigned long) len;
42 __asm__ __volatile__ (
43 "0: cksm %0,%1\n" /* do checksum on longs */
44 " jo 0b\n"
45 : "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
46#else /* __s390x__ */
47 __asm__ __volatile__ (
48 " lgr 2,%1\n" /* address in gpr 2 */
49 " lgfr 3,%2\n" /* length in gpr 3 */
50 "0: cksm %0,2\n" /* do checksum on longs */
51 " jo 0b\n"
52 : "+&d" (sum)
53 : "d" (buff), "d" (len)
54 : "cc", "memory", "2", "3" );
55#endif /* __s390x__ */
56 return sum;
57}
58
59/*
60 * csum_partial as an inline function
61 */
62static inline unsigned int
63csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
64{
65#ifndef __s390x__
66 register_pair rp;
67 35
68 rp.subreg.even = (unsigned long) buff; 36 asm volatile(
69 rp.subreg.odd = (unsigned long) len; 37 "0: cksm %0,%1\n" /* do checksum on longs */
70 __asm__ __volatile__ ( 38 " jo 0b\n"
71 "0: cksm %0,%1\n" /* do checksum on longs */ 39 : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
72 " jo 0b\n"
73 : "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
74#else /* __s390x__ */
75 __asm__ __volatile__ (
76 " lgr 2,%1\n" /* address in gpr 2 */
77 " lgfr 3,%2\n" /* length in gpr 3 */
78 "0: cksm %0,2\n" /* do checksum on longs */
79 " jo 0b\n"
80 : "+&d" (sum)
81 : "d" (buff), "d" (len)
82 : "cc", "memory", "2", "3" );
83#endif /* __s390x__ */
84 return sum; 40 return sum;
85} 41}
86 42
@@ -114,7 +70,7 @@ static inline unsigned int
114csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum) 70csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
115{ 71{
116 memcpy(dst,src,len); 72 memcpy(dst,src,len);
117 return csum_partial_inline(dst, len, sum); 73 return csum_partial(dst, len, sum);
118} 74}
119 75
120/* 76/*
@@ -126,22 +82,22 @@ csum_fold(unsigned int sum)
126#ifndef __s390x__ 82#ifndef __s390x__
127 register_pair rp; 83 register_pair rp;
128 84
129 __asm__ __volatile__ ( 85 asm volatile(
130 " slr %N1,%N1\n" /* %0 = H L */ 86 " slr %N1,%N1\n" /* %0 = H L */
131 " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ 87 " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */
132 " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ 88 " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
133 " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ 89 " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
134 " alr %0,%1\n" /* %0 = H+L+C L+H */ 90 " alr %0,%1\n" /* %0 = H+L+C L+H */
135 " srl %0,16\n" /* %0 = H+L+C */ 91 " srl %0,16\n" /* %0 = H+L+C */
136 : "+&d" (sum), "=d" (rp) : : "cc" ); 92 : "+&d" (sum), "=d" (rp) : : "cc");
137#else /* __s390x__ */ 93#else /* __s390x__ */
138 __asm__ __volatile__ ( 94 asm volatile(
139 " sr 3,3\n" /* %0 = H*65536 + L */ 95 " sr 3,3\n" /* %0 = H*65536 + L */
140 " lr 2,%0\n" /* %0 = H L, R2/R3 = H L / 0 0 */ 96 " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */
141 " srdl 2,16\n" /* %0 = H L, R2/R3 = 0 H / L 0 */ 97 " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */
142 " alr 2,3\n" /* %0 = H L, R2/R3 = L H / L 0 */ 98 " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */
143 " alr %0,2\n" /* %0 = H+L+C L+H */ 99 " alr %0,2\n" /* %0 = H+L+C L+H */
144 " srl %0,16\n" /* %0 = H+L+C */ 100 " srl %0,16\n" /* %0 = H+L+C */
145 : "+&d" (sum) : : "cc", "2", "3"); 101 : "+&d" (sum) : : "cc", "2", "3");
146#endif /* __s390x__ */ 102#endif /* __s390x__ */
147 return ((unsigned short) ~sum); 103 return ((unsigned short) ~sum);
@@ -155,29 +111,7 @@ csum_fold(unsigned int sum)
155static inline unsigned short 111static inline unsigned short
156ip_fast_csum(unsigned char *iph, unsigned int ihl) 112ip_fast_csum(unsigned char *iph, unsigned int ihl)
157{ 113{
158 unsigned long sum; 114 return csum_fold(csum_partial(iph, ihl*4, 0));
159#ifndef __s390x__
160 register_pair rp;
161
162 rp.subreg.even = (unsigned long) iph;
163 rp.subreg.odd = (unsigned long) ihl*4;
164 __asm__ __volatile__ (
165 " sr %0,%0\n" /* set sum to zero */
166 "0: cksm %0,%1\n" /* do checksum on longs */
167 " jo 0b\n"
168 : "=&d" (sum), "+&a" (rp) : : "cc", "memory" );
169#else /* __s390x__ */
170 __asm__ __volatile__ (
171 " slgr %0,%0\n" /* set sum to zero */
172 " lgr 2,%1\n" /* address in gpr 2 */
173 " lgfr 3,%2\n" /* length in gpr 3 */
174 "0: cksm %0,2\n" /* do checksum on ints */
175 " jo 0b\n"
176 : "=&d" (sum)
177 : "d" (iph), "d" (ihl*4)
178 : "cc", "memory", "2", "3" );
179#endif /* __s390x__ */
180 return csum_fold(sum);
181} 115}
182 116
183/* 117/*
@@ -190,47 +124,47 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
190 unsigned int sum) 124 unsigned int sum)
191{ 125{
192#ifndef __s390x__ 126#ifndef __s390x__
193 __asm__ __volatile__ ( 127 asm volatile(
194 " alr %0,%1\n" /* sum += saddr */ 128 " alr %0,%1\n" /* sum += saddr */
195 " brc 12,0f\n" 129 " brc 12,0f\n"
196 " ahi %0,1\n" /* add carry */ 130 " ahi %0,1\n" /* add carry */
197 "0:" 131 "0:"
198 : "+&d" (sum) : "d" (saddr) : "cc" ); 132 : "+&d" (sum) : "d" (saddr) : "cc");
199 __asm__ __volatile__ ( 133 asm volatile(
200 " alr %0,%1\n" /* sum += daddr */ 134 " alr %0,%1\n" /* sum += daddr */
201 " brc 12,1f\n" 135 " brc 12,1f\n"
202 " ahi %0,1\n" /* add carry */ 136 " ahi %0,1\n" /* add carry */
203 "1:" 137 "1:"
204 : "+&d" (sum) : "d" (daddr) : "cc" ); 138 : "+&d" (sum) : "d" (daddr) : "cc");
205 __asm__ __volatile__ ( 139 asm volatile(
206 " alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */ 140 " alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */
207 " brc 12,2f\n" 141 " brc 12,2f\n"
208 " ahi %0,1\n" /* add carry */ 142 " ahi %0,1\n" /* add carry */
209 "2:" 143 "2:"
210 : "+&d" (sum) 144 : "+&d" (sum)
211 : "d" (((unsigned int) len<<16) + (unsigned int) proto) 145 : "d" (((unsigned int) len<<16) + (unsigned int) proto)
212 : "cc" ); 146 : "cc");
213#else /* __s390x__ */ 147#else /* __s390x__ */
214 __asm__ __volatile__ ( 148 asm volatile(
215 " lgfr %0,%0\n" 149 " lgfr %0,%0\n"
216 " algr %0,%1\n" /* sum += saddr */ 150 " algr %0,%1\n" /* sum += saddr */
217 " brc 12,0f\n" 151 " brc 12,0f\n"
218 " aghi %0,1\n" /* add carry */ 152 " aghi %0,1\n" /* add carry */
219 "0: algr %0,%2\n" /* sum += daddr */ 153 "0: algr %0,%2\n" /* sum += daddr */
220 " brc 12,1f\n" 154 " brc 12,1f\n"
221 " aghi %0,1\n" /* add carry */ 155 " aghi %0,1\n" /* add carry */
222 "1: algfr %0,%3\n" /* sum += (len<<16) + proto */ 156 "1: algfr %0,%3\n" /* sum += (len<<16) + proto */
223 " brc 12,2f\n" 157 " brc 12,2f\n"
224 " aghi %0,1\n" /* add carry */ 158 " aghi %0,1\n" /* add carry */
225 "2: srlg 0,%0,32\n" 159 "2: srlg 0,%0,32\n"
226 " alr %0,0\n" /* fold to 32 bits */ 160 " alr %0,0\n" /* fold to 32 bits */
227 " brc 12,3f\n" 161 " brc 12,3f\n"
228 " ahi %0,1\n" /* add carry */ 162 " ahi %0,1\n" /* add carry */
229 "3: llgfr %0,%0" 163 "3: llgfr %0,%0"
230 : "+&d" (sum) 164 : "+&d" (sum)
231 : "d" (saddr), "d" (daddr), 165 : "d" (saddr), "d" (daddr),
232 "d" (((unsigned int) len<<16) + (unsigned int) proto) 166 "d" (((unsigned int) len<<16) + (unsigned int) proto)
233 : "cc", "0" ); 167 : "cc", "0");
234#endif /* __s390x__ */ 168#endif /* __s390x__ */
235 return sum; 169 return sum;
236} 170}
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 28fdd6e2b8ba..da063cd5f0a0 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -270,6 +270,11 @@ struct diag210 {
270 __u32 vrdccrft : 8; /* real device feature (output) */ 270 __u32 vrdccrft : 8; /* real device feature (output) */
271} __attribute__ ((packed,aligned(4))); 271} __attribute__ ((packed,aligned(4)));
272 272
273struct ccw_dev_id {
274 u8 ssid;
275 u16 devno;
276};
277
273extern int diag210(struct diag210 *addr); 278extern int diag210(struct diag210 *addr);
274 279
275extern void wait_cons_dev(void); 280extern void wait_cons_dev(void);
@@ -280,6 +285,8 @@ extern void cio_reset_channel_paths(void);
280 285
281extern void css_schedule_reprobe(void); 286extern void css_schedule_reprobe(void);
282 287
288extern void reipl_ccw_dev(struct ccw_dev_id *id);
289
283#endif 290#endif
284 291
285#endif 292#endif
diff --git a/include/asm-s390/div64.h b/include/asm-s390/div64.h
index af098dc3cf59..6cd978cefb28 100644
--- a/include/asm-s390/div64.h
+++ b/include/asm-s390/div64.h
@@ -1,49 +1 @@
1#ifndef __S390_DIV64
2#define __S390_DIV64
3
4#ifndef __s390x__
5
6/* for do_div "base" needs to be smaller than 2^31-1 */
7#define do_div(n, base) ({ \
8 unsigned long long __n = (n); \
9 unsigned long __r; \
10 \
11 asm (" slr 0,0\n" \
12 " l 1,%1\n" \
13 " srdl 0,1\n" \
14 " dr 0,%2\n" \
15 " alr 1,1\n" \
16 " alr 0,0\n" \
17 " lhi 2,1\n" \
18 " n 2,%1\n" \
19 " alr 0,2\n" \
20 " clr 0,%2\n" \
21 " jl 0f\n" \
22 " slr 0,%2\n" \
23 " ahi 1,1\n" \
24 "0: st 1,%1\n" \
25 " l 1,4+%1\n" \
26 " srdl 0,1\n" \
27 " dr 0,%2\n" \
28 " alr 1,1\n" \
29 " alr 0,0\n" \
30 " lhi 2,1\n" \
31 " n 2,4+%1\n" \
32 " alr 0,2\n" \
33 " clr 0,%2\n" \
34 " jl 1f\n" \
35 " slr 0,%2\n" \
36 " ahi 1,1\n" \
37 "1: st 1,4+%1\n" \
38 " lr %0,0" \
39 : "=d" (__r), "=m" (__n) \
40 : "d" (base), "m" (__n) : "0", "1", "2", "cc" ); \
41 (n) = (__n); \
42 __r; \
43})
44
45#else /* __s390x__ */
46#include <asm-generic/div64.h> #include <asm-generic/div64.h>
47#endif /* __s390x__ */
48
49#endif
diff --git a/include/asm-s390/dma.h b/include/asm-s390/dma.h
index 02720c449cd8..7425c6af6cd4 100644
--- a/include/asm-s390/dma.h
+++ b/include/asm-s390/dma.h
@@ -11,6 +11,6 @@
11 11
12#define MAX_DMA_ADDRESS 0x80000000 12#define MAX_DMA_ADDRESS 0x80000000
13 13
14#define free_dma(x) 14#define free_dma(x) do { } while (0)
15 15
16#endif /* _ASM_DMA_H */ 16#endif /* _ASM_DMA_H */
diff --git a/include/asm-s390/ebcdic.h b/include/asm-s390/ebcdic.h
index 15fd2eda6c90..7f6f641d32f4 100644
--- a/include/asm-s390/ebcdic.h
+++ b/include/asm-s390/ebcdic.h
@@ -26,16 +26,16 @@ codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
26{ 26{
27 if (nr-- <= 0) 27 if (nr-- <= 0)
28 return; 28 return;
29 __asm__ __volatile__( 29 asm volatile(
30 " bras 1,1f\n" 30 " bras 1,1f\n"
31 " tr 0(1,%0),0(%2)\n" 31 " tr 0(1,%0),0(%2)\n"
32 "0: tr 0(256,%0),0(%2)\n" 32 "0: tr 0(256,%0),0(%2)\n"
33 " la %0,256(%0)\n" 33 " la %0,256(%0)\n"
34 "1: ahi %1,-256\n" 34 "1: ahi %1,-256\n"
35 " jnm 0b\n" 35 " jnm 0b\n"
36 " ex %1,0(1)" 36 " ex %1,0(1)"
37 : "+&a" (addr), "+&a" (nr) 37 : "+&a" (addr), "+&a" (nr)
38 : "a" (codepage) : "cc", "memory", "1" ); 38 : "a" (codepage) : "cc", "memory", "1");
39} 39}
40 40
41#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr) 41#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
index ffedf14f89f6..5e261e1de671 100644
--- a/include/asm-s390/futex.h
+++ b/include/asm-s390/futex.h
@@ -7,75 +7,21 @@
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9 9
10#ifndef __s390x__
11#define __futex_atomic_fixup \
12 ".section __ex_table,\"a\"\n" \
13 " .align 4\n" \
14 " .long 0b,4b,2b,4b,3b,4b\n" \
15 ".previous"
16#else /* __s390x__ */
17#define __futex_atomic_fixup \
18 ".section __ex_table,\"a\"\n" \
19 " .align 8\n" \
20 " .quad 0b,4b,2b,4b,3b,4b\n" \
21 ".previous"
22#endif /* __s390x__ */
23
24#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
25 asm volatile(" sacf 256\n" \
26 "0: l %1,0(%6)\n" \
27 "1: " insn \
28 "2: cs %1,%2,0(%6)\n" \
29 "3: jl 1b\n" \
30 " lhi %0,0\n" \
31 "4: sacf 0\n" \
32 __futex_atomic_fixup \
33 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
34 "=m" (*uaddr) \
35 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
36 "m" (*uaddr) : "cc" );
37
38static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 10static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
39{ 11{
40 int op = (encoded_op >> 28) & 7; 12 int op = (encoded_op >> 28) & 7;
41 int cmp = (encoded_op >> 24) & 15; 13 int cmp = (encoded_op >> 24) & 15;
42 int oparg = (encoded_op << 8) >> 20; 14 int oparg = (encoded_op << 8) >> 20;
43 int cmparg = (encoded_op << 20) >> 20; 15 int cmparg = (encoded_op << 20) >> 20;
44 int oldval = 0, newval, ret; 16 int oldval, ret;
17
45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 oparg = 1 << oparg; 19 oparg = 1 << oparg;
47 20
48 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
49 return -EFAULT; 22 return -EFAULT;
50 23
51 inc_preempt_count(); 24 ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
52
53 switch (op) {
54 case FUTEX_OP_SET:
55 __futex_atomic_op("lr %2,%5\n",
56 ret, oldval, newval, uaddr, oparg);
57 break;
58 case FUTEX_OP_ADD:
59 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
60 ret, oldval, newval, uaddr, oparg);
61 break;
62 case FUTEX_OP_OR:
63 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
64 ret, oldval, newval, uaddr, oparg);
65 break;
66 case FUTEX_OP_ANDN:
67 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
68 ret, oldval, newval, uaddr, oparg);
69 break;
70 case FUTEX_OP_XOR:
71 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
72 ret, oldval, newval, uaddr, oparg);
73 break;
74 default:
75 ret = -ENOSYS;
76 }
77
78 dec_preempt_count();
79 25
80 if (!ret) { 26 if (!ret) {
81 switch (cmp) { 27 switch (cmp) {
@@ -91,32 +37,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
91 return ret; 37 return ret;
92} 38}
93 39
94static inline int 40static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr,
95futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 41 int oldval, int newval)
96{ 42{
97 int ret;
98
99 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
100 return -EFAULT; 44 return -EFAULT;
101 asm volatile(" sacf 256\n" 45
102 " cs %1,%4,0(%5)\n" 46 return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval);
103 "0: lr %0,%1\n"
104 "1: sacf 0\n"
105#ifndef __s390x__
106 ".section __ex_table,\"a\"\n"
107 " .align 4\n"
108 " .long 0b,1b\n"
109 ".previous"
110#else /* __s390x__ */
111 ".section __ex_table,\"a\"\n"
112 " .align 8\n"
113 " .quad 0b,1b\n"
114 ".previous"
115#endif /* __s390x__ */
116 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
117 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
118 : "cc", "memory" );
119 return oldval;
120} 47}
121 48
122#endif /* __KERNEL__ */ 49#endif /* __KERNEL__ */
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h
index d4614b35f423..63c78b9399c4 100644
--- a/include/asm-s390/io.h
+++ b/include/asm-s390/io.h
@@ -27,18 +27,16 @@
27static inline unsigned long virt_to_phys(volatile void * address) 27static inline unsigned long virt_to_phys(volatile void * address)
28{ 28{
29 unsigned long real_address; 29 unsigned long real_address;
30 __asm__ ( 30 asm volatile(
31#ifndef __s390x__ 31#ifndef __s390x__
32 " lra %0,0(%1)\n" 32 " lra %0,0(%1)\n"
33 " jz 0f\n"
34 " sr %0,%0\n"
35#else /* __s390x__ */ 33#else /* __s390x__ */
36 " lrag %0,0(%1)\n" 34 " lrag %0,0(%1)\n"
37 " jz 0f\n"
38 " slgr %0,%0\n"
39#endif /* __s390x__ */ 35#endif /* __s390x__ */
36 " jz 0f\n"
37 " la %0,0\n"
40 "0:" 38 "0:"
41 : "=a" (real_address) : "a" (address) : "cc" ); 39 : "=a" (real_address) : "a" (address) : "cc");
42 return real_address; 40 return real_address;
43} 41}
44 42
@@ -116,7 +114,7 @@ extern void iounmap(void *addr);
116#define outb(x,addr) ((void) writeb(x,addr)) 114#define outb(x,addr) ((void) writeb(x,addr))
117#define outb_p(x,addr) outb(x,addr) 115#define outb_p(x,addr) outb(x,addr)
118 116
119#define mmiowb() 117#define mmiowb() do { } while (0)
120 118
121/* 119/*
122 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 120 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/include/asm-s390/irq.h b/include/asm-s390/irq.h
index bd1a721f7aa2..7da991a858f8 100644
--- a/include/asm-s390/irq.h
+++ b/include/asm-s390/irq.h
@@ -19,8 +19,5 @@ enum interruption_class {
19 NR_IRQS, 19 NR_IRQS,
20}; 20};
21 21
22#define touch_nmi_watchdog() do { } while(0)
23
24#endif /* __KERNEL__ */ 22#endif /* __KERNEL__ */
25#endif 23#endif
26
diff --git a/include/asm-s390/irqflags.h b/include/asm-s390/irqflags.h
index 3b566a5b3cc7..3f26131120b7 100644
--- a/include/asm-s390/irqflags.h
+++ b/include/asm-s390/irqflags.h
@@ -10,43 +10,93 @@
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
14
15/* store then or system mask. */
16#define __raw_local_irq_stosm(__or) \
17({ \
18 unsigned long __mask; \
19 asm volatile( \
20 " stosm %0,%1" \
21 : "=Q" (__mask) : "i" (__or) : "memory"); \
22 __mask; \
23})
24
25/* store then and system mask. */
26#define __raw_local_irq_stnsm(__and) \
27({ \
28 unsigned long __mask; \
29 asm volatile( \
30 " stnsm %0,%1" \
31 : "=Q" (__mask) : "i" (__and) : "memory"); \
32 __mask; \
33})
34
35/* set system mask. */
36#define __raw_local_irq_ssm(__mask) \
37({ \
38 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
39})
40
41#else /* __GNUC__ */
42
43/* store then or system mask. */
44#define __raw_local_irq_stosm(__or) \
45({ \
46 unsigned long __mask; \
47 asm volatile( \
48 " stosm 0(%1),%2" \
49 : "=m" (__mask) \
50 : "a" (&__mask), "i" (__or) : "memory"); \
51 __mask; \
52})
53
54/* store then and system mask. */
55#define __raw_local_irq_stnsm(__and) \
56({ \
57 unsigned long __mask; \
58 asm volatile( \
59 " stnsm 0(%1),%2" \
60 : "=m" (__mask) \
61 : "a" (&__mask), "i" (__and) : "memory"); \
62 __mask; \
63})
64
65/* set system mask. */
66#define __raw_local_irq_ssm(__mask) \
67({ \
68 asm volatile( \
69 " ssm 0(%0)" \
70 : : "a" (&__mask), "m" (__mask) : "memory"); \
71})
72
73#endif /* __GNUC__ */
74
13/* interrupt control.. */ 75/* interrupt control.. */
14#define raw_local_irq_enable() ({ \ 76static inline unsigned long raw_local_irq_enable(void)
15 unsigned long __dummy; \ 77{
16 __asm__ __volatile__ ( \ 78 return __raw_local_irq_stosm(0x03);
17 "stosm 0(%1),0x03" \ 79}
18 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
19 })
20
21#define raw_local_irq_disable() ({ \
22 unsigned long __flags; \
23 __asm__ __volatile__ ( \
24 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
25 __flags; \
26 })
27
28#define raw_local_save_flags(x) \
29do { \
30 typecheck(unsigned long, x); \
31 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) ); \
32} while (0)
33 80
34#define raw_local_irq_restore(x) \ 81static inline unsigned long raw_local_irq_disable(void)
35do { \ 82{
36 typecheck(unsigned long, x); \ 83 return __raw_local_irq_stnsm(0xfc);
37 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory"); \ 84}
85
86#define raw_local_save_flags(x) \
87do { \
88 typecheck(unsigned long, x); \
89 (x) = __raw_local_irq_stosm(0x00); \
38} while (0) 90} while (0)
39 91
40#define raw_irqs_disabled() \ 92static inline void raw_local_irq_restore(unsigned long flags)
41({ \ 93{
42 unsigned long flags; \ 94 __raw_local_irq_ssm(flags);
43 raw_local_save_flags(flags); \ 95}
44 !((flags >> __FLAG_SHIFT) & 3); \
45})
46 96
47static inline int raw_irqs_disabled_flags(unsigned long flags) 97static inline int raw_irqs_disabled_flags(unsigned long flags)
48{ 98{
49 return !((flags >> __FLAG_SHIFT) & 3); 99 return !(flags & (3UL << (BITS_PER_LONG - 8)));
50} 100}
51 101
52/* For spinlocks etc */ 102/* For spinlocks etc */
diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h
new file mode 100644
index 000000000000..40cc68025e01
--- /dev/null
+++ b/include/asm-s390/kdebug.h
@@ -0,0 +1,59 @@
1#ifndef _S390_KDEBUG_H
2#define _S390_KDEBUG_H
3
4/*
5 * Feb 2006 Ported to s390 <grundym@us.ibm.com>
6 */
7#include <linux/notifier.h>
8
9struct pt_regs;
10
11struct die_args {
12 struct pt_regs *regs;
13 const char *str;
14 long err;
15 int trapnr;
16 int signr;
17};
18
19/* Note - you should never unregister because that can race with NMIs.
20 * If you really want to do it first unregister - then synchronize_sched
21 * - then free.
22 */
23extern int register_die_notifier(struct notifier_block *);
24extern int unregister_die_notifier(struct notifier_block *);
25extern int register_page_fault_notifier(struct notifier_block *);
26extern int unregister_page_fault_notifier(struct notifier_block *);
27extern struct atomic_notifier_head s390die_chain;
28
29
30enum die_val {
31 DIE_OOPS = 1,
32 DIE_BPT,
33 DIE_SSTEP,
34 DIE_PANIC,
35 DIE_NMI,
36 DIE_DIE,
37 DIE_NMIWATCHDOG,
38 DIE_KERNELDEBUG,
39 DIE_TRAP,
40 DIE_GPF,
41 DIE_CALL,
42 DIE_NMI_IPI,
43 DIE_PAGE_FAULT,
44};
45
46static inline int notify_die(enum die_val val, const char *str,
47 struct pt_regs *regs, long err, int trap, int sig)
48{
49 struct die_args args = {
50 .regs = regs,
51 .str = str,
52 .err = err,
53 .trapnr = trap,
54 .signr = sig
55 };
56 return atomic_notifier_call_chain(&s390die_chain, val, &args);
57}
58
59#endif
diff --git a/include/asm-s390/kprobes.h b/include/asm-s390/kprobes.h
new file mode 100644
index 000000000000..b847ff0ec3fa
--- /dev/null
+++ b/include/asm-s390/kprobes.h
@@ -0,0 +1,114 @@
1#ifndef _ASM_S390_KPROBES_H
2#define _ASM_S390_KPROBES_H
3/*
4 * Kernel Probes (KProbes)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright (C) IBM Corporation, 2002, 2006
21 *
22 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
23 * Probes initial implementation ( includes suggestions from
24 * Rusty Russell).
25 * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli
26 * <ananth@in.ibm.com>
27 * 2005-Dec Used as a template for s390 by Mike Grundy
28 * <grundym@us.ibm.com>
29 */
30#include <linux/types.h>
31#include <linux/ptrace.h>
32#include <linux/percpu.h>
33
34#define __ARCH_WANT_KPROBES_INSN_SLOT
35struct pt_regs;
36struct kprobe;
37
38typedef u16 kprobe_opcode_t;
39#define BREAKPOINT_INSTRUCTION 0x0002
40
41/* Maximum instruction size is 3 (16bit) halfwords: */
42#define MAX_INSN_SIZE 0x0003
43#define MAX_STACK_SIZE 64
44#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
45 (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
46 ? (MAX_STACK_SIZE) \
47 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
48
49#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry)
50
51#define ARCH_SUPPORTS_KRETPROBES
52#define ARCH_INACTIVE_KPROBE_COUNT 0
53
54#define KPROBE_SWAP_INST 0x10
55
56#define FIXUP_PSW_NORMAL 0x08
57#define FIXUP_BRANCH_NOT_TAKEN 0x04
58#define FIXUP_RETURN_REGISTER 0x02
59#define FIXUP_NOT_REQUIRED 0x01
60
61/* Architecture specific copy of original instruction */
62struct arch_specific_insn {
63 /* copy of original instruction */
64 kprobe_opcode_t *insn;
65 int fixup;
66 int ilen;
67 int reg;
68};
69
70struct ins_replace_args {
71 kprobe_opcode_t *ptr;
72 kprobe_opcode_t old;
73 kprobe_opcode_t new;
74};
75struct prev_kprobe {
76 struct kprobe *kp;
77 unsigned long status;
78 unsigned long saved_psw;
79 unsigned long kprobe_saved_imask;
80 unsigned long kprobe_saved_ctl[3];
81};
82
83/* per-cpu kprobe control block */
84struct kprobe_ctlblk {
85 unsigned long kprobe_status;
86 unsigned long kprobe_saved_imask;
87 unsigned long kprobe_saved_ctl[3];
88 struct pt_regs jprobe_saved_regs;
89 unsigned long jprobe_saved_r14;
90 unsigned long jprobe_saved_r15;
91 struct prev_kprobe prev_kprobe;
92 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
93};
94
95void arch_remove_kprobe(struct kprobe *p);
96void kretprobe_trampoline(void);
97int is_prohibited_opcode(kprobe_opcode_t *instruction);
98void get_instruction_type(struct arch_specific_insn *ainsn);
99
100#define flush_insn_slot(p) do { } while (0)
101
102#endif /* _ASM_S390_KPROBES_H */
103
104#ifdef CONFIG_KPROBES
105
106extern int kprobe_exceptions_notify(struct notifier_block *self,
107 unsigned long val, void *data);
108#else /* !CONFIG_KPROBES */
109static inline int kprobe_exceptions_notify(struct notifier_block *self,
110 unsigned long val, void *data)
111{
112 return 0;
113}
114#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 596c8b172104..06583ed0bde7 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -35,6 +35,7 @@
35#define __LC_IO_NEW_PSW 0x01f0 35#define __LC_IO_NEW_PSW 0x01f0
36#endif /* !__s390x__ */ 36#endif /* !__s390x__ */
37 37
38#define __LC_IPL_PARMBLOCK_PTR 0x014
38#define __LC_EXT_PARAMS 0x080 39#define __LC_EXT_PARAMS 0x080
39#define __LC_CPU_ADDRESS 0x084 40#define __LC_CPU_ADDRESS 0x084
40#define __LC_EXT_INT_CODE 0x086 41#define __LC_EXT_INT_CODE 0x086
@@ -47,6 +48,7 @@
47#define __LC_PER_ATMID 0x096 48#define __LC_PER_ATMID 0x096
48#define __LC_PER_ADDRESS 0x098 49#define __LC_PER_ADDRESS 0x098
49#define __LC_PER_ACCESS_ID 0x0A1 50#define __LC_PER_ACCESS_ID 0x0A1
51#define __LC_AR_MODE_ID 0x0A3
50 52
51#define __LC_SUBCHANNEL_ID 0x0B8 53#define __LC_SUBCHANNEL_ID 0x0B8
52#define __LC_SUBCHANNEL_NR 0x0BA 54#define __LC_SUBCHANNEL_NR 0x0BA
@@ -106,18 +108,28 @@
106#define __LC_INT_CLOCK 0xDE8 108#define __LC_INT_CLOCK 0xDE8
107#endif /* __s390x__ */ 109#endif /* __s390x__ */
108 110
109#define __LC_PANIC_MAGIC 0xE00
110 111
112#define __LC_PANIC_MAGIC 0xE00
111#ifndef __s390x__ 113#ifndef __s390x__
112#define __LC_PFAULT_INTPARM 0x080 114#define __LC_PFAULT_INTPARM 0x080
113#define __LC_CPU_TIMER_SAVE_AREA 0x0D8 115#define __LC_CPU_TIMER_SAVE_AREA 0x0D8
116#define __LC_CLOCK_COMP_SAVE_AREA 0x0E0
117#define __LC_PSW_SAVE_AREA 0x100
118#define __LC_PREFIX_SAVE_AREA 0x108
114#define __LC_AREGS_SAVE_AREA 0x120 119#define __LC_AREGS_SAVE_AREA 0x120
120#define __LC_FPREGS_SAVE_AREA 0x160
115#define __LC_GPREGS_SAVE_AREA 0x180 121#define __LC_GPREGS_SAVE_AREA 0x180
116#define __LC_CREGS_SAVE_AREA 0x1C0 122#define __LC_CREGS_SAVE_AREA 0x1C0
117#else /* __s390x__ */ 123#else /* __s390x__ */
118#define __LC_PFAULT_INTPARM 0x11B8 124#define __LC_PFAULT_INTPARM 0x11B8
125#define __LC_FPREGS_SAVE_AREA 0x1200
119#define __LC_GPREGS_SAVE_AREA 0x1280 126#define __LC_GPREGS_SAVE_AREA 0x1280
127#define __LC_PSW_SAVE_AREA 0x1300
128#define __LC_PREFIX_SAVE_AREA 0x1318
129#define __LC_FP_CREG_SAVE_AREA 0x131C
130#define __LC_TODREG_SAVE_AREA 0x1324
120#define __LC_CPU_TIMER_SAVE_AREA 0x1328 131#define __LC_CPU_TIMER_SAVE_AREA 0x1328
132#define __LC_CLOCK_COMP_SAVE_AREA 0x1331
121#define __LC_AREGS_SAVE_AREA 0x1340 133#define __LC_AREGS_SAVE_AREA 0x1340
122#define __LC_CREGS_SAVE_AREA 0x1380 134#define __LC_CREGS_SAVE_AREA 0x1380
123#endif /* __s390x__ */ 135#endif /* __s390x__ */
@@ -347,7 +359,7 @@ extern struct _lowcore *lowcore_ptr[];
347 359
348static inline void set_prefix(__u32 address) 360static inline void set_prefix(__u32 address)
349{ 361{
350 __asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" ); 362 asm volatile("spx %0" : : "m" (address) : "memory");
351} 363}
352 364
353#define __PANIC_MAGIC 0xDEADC0DE 365#define __PANIC_MAGIC 0xDEADC0DE
diff --git a/include/asm-s390/monwriter.h b/include/asm-s390/monwriter.h
new file mode 100644
index 000000000000..f0cbf96c52e6
--- /dev/null
+++ b/include/asm-s390/monwriter.h
@@ -0,0 +1,33 @@
1/*
2 * include/asm-s390/monwriter.h
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Character device driver for writing z/VM APPLDATA monitor records
6 * Version 1.0
7 * Author(s): Melissa Howland <melissah@us.ibm.com>
8 *
9 */
10
11#ifndef _ASM_390_MONWRITER_H
12#define _ASM_390_MONWRITER_H
13
14/* mon_function values */
15#define MONWRITE_START_INTERVAL 0x00 /* start interval recording */
16#define MONWRITE_STOP_INTERVAL 0x01 /* stop interval or config recording */
17#define MONWRITE_GEN_EVENT 0x02 /* generate event record */
18#define MONWRITE_START_CONFIG 0x03 /* start configuration recording */
19
20/* the header the app uses in its write() data */
21struct monwrite_hdr {
22 unsigned char mon_function;
23 unsigned short applid;
24 unsigned char record_num;
25 unsigned short version;
26 unsigned short release;
27 unsigned short mod_level;
28 unsigned short datalen;
29 unsigned char hdrlen;
30
31} __attribute__((packed));
32
33#endif /* _ASM_390_MONWRITER_H */
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index b2628dc5c490..796c400f2b79 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -22,89 +22,45 @@
22#include <asm/setup.h> 22#include <asm/setup.h>
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24 24
25#ifndef __s390x__
26
27static inline void clear_page(void *page)
28{
29 register_pair rp;
30
31 rp.subreg.even = (unsigned long) page;
32 rp.subreg.odd = (unsigned long) 4096;
33 asm volatile (" slr 1,1\n"
34 " mvcl %0,0"
35 : "+&a" (rp) : : "memory", "cc", "1" );
36}
37
38static inline void copy_page(void *to, void *from)
39{
40 if (MACHINE_HAS_MVPG)
41 asm volatile (" sr 0,0\n"
42 " mvpg %0,%1"
43 : : "a" ((void *)(to)), "a" ((void *)(from))
44 : "memory", "cc", "0" );
45 else
46 asm volatile (" mvc 0(256,%0),0(%1)\n"
47 " mvc 256(256,%0),256(%1)\n"
48 " mvc 512(256,%0),512(%1)\n"
49 " mvc 768(256,%0),768(%1)\n"
50 " mvc 1024(256,%0),1024(%1)\n"
51 " mvc 1280(256,%0),1280(%1)\n"
52 " mvc 1536(256,%0),1536(%1)\n"
53 " mvc 1792(256,%0),1792(%1)\n"
54 " mvc 2048(256,%0),2048(%1)\n"
55 " mvc 2304(256,%0),2304(%1)\n"
56 " mvc 2560(256,%0),2560(%1)\n"
57 " mvc 2816(256,%0),2816(%1)\n"
58 " mvc 3072(256,%0),3072(%1)\n"
59 " mvc 3328(256,%0),3328(%1)\n"
60 " mvc 3584(256,%0),3584(%1)\n"
61 " mvc 3840(256,%0),3840(%1)\n"
62 : : "a"((void *)(to)),"a"((void *)(from))
63 : "memory" );
64}
65
66#else /* __s390x__ */
67
68static inline void clear_page(void *page) 25static inline void clear_page(void *page)
69{ 26{
70 asm volatile (" lgr 2,%0\n" 27 register unsigned long reg1 asm ("1") = 0;
71 " lghi 3,4096\n" 28 register void *reg2 asm ("2") = page;
72 " slgr 1,1\n" 29 register unsigned long reg3 asm ("3") = 4096;
73 " mvcl 2,0" 30 asm volatile(
74 : : "a" ((void *) (page)) 31 " mvcl 2,0"
75 : "memory", "cc", "1", "2", "3" ); 32 : "+d" (reg2), "+d" (reg3) : "d" (reg1) : "memory", "cc");
76} 33}
77 34
78static inline void copy_page(void *to, void *from) 35static inline void copy_page(void *to, void *from)
79{ 36{
80 if (MACHINE_HAS_MVPG) 37 if (MACHINE_HAS_MVPG) {
81 asm volatile (" sgr 0,0\n" 38 register unsigned long reg0 asm ("0") = 0;
82 " mvpg %0,%1" 39 asm volatile(
83 : : "a" ((void *)(to)), "a" ((void *)(from)) 40 " mvpg %0,%1"
84 : "memory", "cc", "0" ); 41 : : "a" (to), "a" (from), "d" (reg0)
85 else 42 : "memory", "cc");
86 asm volatile (" mvc 0(256,%0),0(%1)\n" 43 } else
87 " mvc 256(256,%0),256(%1)\n" 44 asm volatile(
88 " mvc 512(256,%0),512(%1)\n" 45 " mvc 0(256,%0),0(%1)\n"
89 " mvc 768(256,%0),768(%1)\n" 46 " mvc 256(256,%0),256(%1)\n"
90 " mvc 1024(256,%0),1024(%1)\n" 47 " mvc 512(256,%0),512(%1)\n"
91 " mvc 1280(256,%0),1280(%1)\n" 48 " mvc 768(256,%0),768(%1)\n"
92 " mvc 1536(256,%0),1536(%1)\n" 49 " mvc 1024(256,%0),1024(%1)\n"
93 " mvc 1792(256,%0),1792(%1)\n" 50 " mvc 1280(256,%0),1280(%1)\n"
94 " mvc 2048(256,%0),2048(%1)\n" 51 " mvc 1536(256,%0),1536(%1)\n"
95 " mvc 2304(256,%0),2304(%1)\n" 52 " mvc 1792(256,%0),1792(%1)\n"
96 " mvc 2560(256,%0),2560(%1)\n" 53 " mvc 2048(256,%0),2048(%1)\n"
97 " mvc 2816(256,%0),2816(%1)\n" 54 " mvc 2304(256,%0),2304(%1)\n"
98 " mvc 3072(256,%0),3072(%1)\n" 55 " mvc 2560(256,%0),2560(%1)\n"
99 " mvc 3328(256,%0),3328(%1)\n" 56 " mvc 2816(256,%0),2816(%1)\n"
100 " mvc 3584(256,%0),3584(%1)\n" 57 " mvc 3072(256,%0),3072(%1)\n"
101 " mvc 3840(256,%0),3840(%1)\n" 58 " mvc 3328(256,%0),3328(%1)\n"
102 : : "a"((void *)(to)),"a"((void *)(from)) 59 " mvc 3584(256,%0),3584(%1)\n"
103 : "memory" ); 60 " mvc 3840(256,%0),3840(%1)\n"
61 : : "a" (to), "a" (from) : "memory");
104} 62}
105 63
106#endif /* __s390x__ */
107
108#define clear_user_page(page, vaddr, pg) clear_page(page) 64#define clear_user_page(page, vaddr, pg) clear_page(page)
109#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 65#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
110 66
@@ -159,7 +115,7 @@ extern unsigned int default_storage_key;
159static inline void 115static inline void
160page_set_storage_key(unsigned long addr, unsigned int skey) 116page_set_storage_key(unsigned long addr, unsigned int skey)
161{ 117{
162 asm volatile ( "sske %0,%1" : : "d" (skey), "a" (addr) ); 118 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
163} 119}
164 120
165static inline unsigned int 121static inline unsigned int
@@ -167,8 +123,7 @@ page_get_storage_key(unsigned long addr)
167{ 123{
168 unsigned int skey; 124 unsigned int skey;
169 125
170 asm volatile ( "iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0) ); 126 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0));
171
172 return skey; 127 return skey;
173} 128}
174 129
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 28b3517e787c..495ad99c7635 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -15,18 +15,20 @@
15 */ 15 */
16#if defined(__s390x__) && defined(MODULE) 16#if defined(__s390x__) && defined(MODULE)
17 17
18#define __reloc_hide(var,offset) \ 18#define __reloc_hide(var,offset) (*({ \
19 (*({ unsigned long *__ptr; \ 19 extern int simple_indentifier_##var(void); \
20 asm ( "larl %0,per_cpu__"#var"@GOTENT" \ 20 unsigned long *__ptr; \
21 : "=a" (__ptr) : "X" (per_cpu__##var) ); \ 21 asm ( "larl %0,per_cpu__"#var"@GOTENT" \
22 (typeof(&per_cpu__##var))((*__ptr) + (offset)); })) 22 : "=a" (__ptr) : "X" (per_cpu__##var) ); \
23 (typeof(&per_cpu__##var))((*__ptr) + (offset)); }))
23 24
24#else 25#else
25 26
26#define __reloc_hide(var, offset) \ 27#define __reloc_hide(var, offset) (*({ \
27 (*({ unsigned long __ptr; \ 28 extern int simple_indentifier_##var(void); \
28 asm ( "" : "=a" (__ptr) : "0" (&per_cpu__##var) ); \ 29 unsigned long __ptr; \
29 (typeof(&per_cpu__##var)) (__ptr + (offset)); })) 30 asm ( "" : "=a" (__ptr) : "0" (&per_cpu__##var) ); \
31 (typeof(&per_cpu__##var)) (__ptr + (offset)); }))
30 32
31#endif 33#endif
32 34
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index a78e853e0dd5..803bc7064418 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -22,6 +22,16 @@
22extern void diag10(unsigned long addr); 22extern void diag10(unsigned long addr);
23 23
24/* 24/*
25 * Page allocation orders.
26 */
27#ifndef __s390x__
28# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */
30# define PMD_ALLOC_ORDER 2
31# define PGD_ALLOC_ORDER 2
32#endif /* __s390x__ */
33
34/*
25 * Allocate and free page tables. The xxx_kernel() versions are 35 * Allocate and free page tables. The xxx_kernel() versions are
26 * used to allocate a kernel page table - this turns on ASN bits 36 * used to allocate a kernel page table - this turns on ASN bits
27 * if any. 37 * if any.
@@ -29,30 +39,23 @@ extern void diag10(unsigned long addr);
29 39
30static inline pgd_t *pgd_alloc(struct mm_struct *mm) 40static inline pgd_t *pgd_alloc(struct mm_struct *mm)
31{ 41{
32 pgd_t *pgd; 42 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
33 int i; 43 int i;
34 44
45 if (!pgd)
46 return NULL;
47 for (i = 0; i < PTRS_PER_PGD; i++)
35#ifndef __s390x__ 48#ifndef __s390x__
36 pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1); 49 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
37 if (pgd != NULL) 50#else
38 for (i = 0; i < USER_PTRS_PER_PGD; i++) 51 pgd_clear(pgd + i);
39 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); 52#endif
40#else /* __s390x__ */
41 pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
42 if (pgd != NULL)
43 for (i = 0; i < PTRS_PER_PGD; i++)
44 pgd_clear(pgd + i);
45#endif /* __s390x__ */
46 return pgd; 53 return pgd;
47} 54}
48 55
49static inline void pgd_free(pgd_t *pgd) 56static inline void pgd_free(pgd_t *pgd)
50{ 57{
51#ifndef __s390x__ 58 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
52 free_pages((unsigned long) pgd, 1);
53#else /* __s390x__ */
54 free_pages((unsigned long) pgd, 2);
55#endif /* __s390x__ */
56} 59}
57 60
58#ifndef __s390x__ 61#ifndef __s390x__
@@ -68,20 +71,19 @@ static inline void pgd_free(pgd_t *pgd)
68#else /* __s390x__ */ 71#else /* __s390x__ */
69static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 72static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
70{ 73{
71 pmd_t *pmd; 74 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
72 int i; 75 int i;
73 76
74 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 2); 77 if (!pmd)
75 if (pmd != NULL) { 78 return NULL;
76 for (i=0; i < PTRS_PER_PMD; i++) 79 for (i=0; i < PTRS_PER_PMD; i++)
77 pmd_clear(pmd+i); 80 pmd_clear(pmd + i);
78 }
79 return pmd; 81 return pmd;
80} 82}
81 83
82static inline void pmd_free (pmd_t *pmd) 84static inline void pmd_free (pmd_t *pmd)
83{ 85{
84 free_pages((unsigned long) pmd, 2); 86 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
85} 87}
86 88
87#define __pmd_free_tlb(tlb,pmd) \ 89#define __pmd_free_tlb(tlb,pmd) \
@@ -123,15 +125,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
123static inline pte_t * 125static inline pte_t *
124pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) 126pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
125{ 127{
126 pte_t *pte; 128 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
127 int i; 129 int i;
128 130
129 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 131 if (!pte)
130 if (pte != NULL) { 132 return NULL;
131 for (i=0; i < PTRS_PER_PTE; i++) { 133 for (i=0; i < PTRS_PER_PTE; i++) {
132 pte_clear(mm, vmaddr, pte+i); 134 pte_clear(mm, vmaddr, pte + i);
133 vmaddr += PAGE_SIZE; 135 vmaddr += PAGE_SIZE;
134 }
135 } 136 }
136 return pte; 137 return pte;
137} 138}
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 24312387fa24..ecdff13b2505 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -31,9 +31,9 @@
31 * the S390 page table tree. 31 * the S390 page table tree.
32 */ 32 */
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34#include <linux/mm_types.h>
34#include <asm/bug.h> 35#include <asm/bug.h>
35#include <asm/processor.h> 36#include <asm/processor.h>
36#include <linux/threads.h>
37 37
38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ 38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
39struct mm_struct; 39struct mm_struct;
@@ -89,19 +89,6 @@ extern char empty_zero_page[PAGE_SIZE];
89# define PTRS_PER_PGD 2048 89# define PTRS_PER_PGD 2048
90#endif /* __s390x__ */ 90#endif /* __s390x__ */
91 91
92/*
93 * pgd entries used up by user/kernel:
94 */
95#ifndef __s390x__
96# define USER_PTRS_PER_PGD 512
97# define USER_PGD_PTRS 512
98# define KERNEL_PGD_PTRS 512
99#else /* __s390x__ */
100# define USER_PTRS_PER_PGD 2048
101# define USER_PGD_PTRS 2048
102# define KERNEL_PGD_PTRS 2048
103#endif /* __s390x__ */
104
105#define FIRST_USER_ADDRESS 0 92#define FIRST_USER_ADDRESS 0
106 93
107#define pte_ERROR(e) \ 94#define pte_ERROR(e) \
@@ -216,12 +203,14 @@ extern char empty_zero_page[PAGE_SIZE];
216#define _PAGE_RO 0x200 /* HW read-only */ 203#define _PAGE_RO 0x200 /* HW read-only */
217#define _PAGE_INVALID 0x400 /* HW invalid */ 204#define _PAGE_INVALID 0x400 /* HW invalid */
218 205
219/* Mask and four different kinds of invalid pages. */ 206/* Mask and six different types of pages. */
220#define _PAGE_INVALID_MASK 0x601 207#define _PAGE_TYPE_MASK 0x601
221#define _PAGE_INVALID_EMPTY 0x400 208#define _PAGE_TYPE_EMPTY 0x400
222#define _PAGE_INVALID_NONE 0x401 209#define _PAGE_TYPE_NONE 0x401
223#define _PAGE_INVALID_SWAP 0x600 210#define _PAGE_TYPE_SWAP 0x600
224#define _PAGE_INVALID_FILE 0x601 211#define _PAGE_TYPE_FILE 0x601
212#define _PAGE_TYPE_RO 0x200
213#define _PAGE_TYPE_RW 0x000
225 214
226#ifndef __s390x__ 215#ifndef __s390x__
227 216
@@ -280,15 +269,14 @@ extern char empty_zero_page[PAGE_SIZE];
280#endif /* __s390x__ */ 269#endif /* __s390x__ */
281 270
282/* 271/*
283 * No mapping available 272 * Page protection definitions.
284 */ 273 */
285#define PAGE_NONE_SHARED __pgprot(_PAGE_INVALID_NONE) 274#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
286#define PAGE_NONE_PRIVATE __pgprot(_PAGE_INVALID_NONE) 275#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
287#define PAGE_RO_SHARED __pgprot(_PAGE_RO) 276#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
288#define PAGE_RO_PRIVATE __pgprot(_PAGE_RO) 277
289#define PAGE_COPY __pgprot(_PAGE_RO) 278#define PAGE_KERNEL PAGE_RW
290#define PAGE_SHARED __pgprot(0) 279#define PAGE_COPY PAGE_RO
291#define PAGE_KERNEL __pgprot(0)
292 280
293/* 281/*
294 * The S390 can't do page protection for execute, and considers that the 282 * The S390 can't do page protection for execute, and considers that the
@@ -296,23 +284,23 @@ extern char empty_zero_page[PAGE_SIZE];
296 * the closest we can get.. 284 * the closest we can get..
297 */ 285 */
298 /*xwr*/ 286 /*xwr*/
299#define __P000 PAGE_NONE_PRIVATE 287#define __P000 PAGE_NONE
300#define __P001 PAGE_RO_PRIVATE 288#define __P001 PAGE_RO
301#define __P010 PAGE_COPY 289#define __P010 PAGE_RO
302#define __P011 PAGE_COPY 290#define __P011 PAGE_RO
303#define __P100 PAGE_RO_PRIVATE 291#define __P100 PAGE_RO
304#define __P101 PAGE_RO_PRIVATE 292#define __P101 PAGE_RO
305#define __P110 PAGE_COPY 293#define __P110 PAGE_RO
306#define __P111 PAGE_COPY 294#define __P111 PAGE_RO
307 295
308#define __S000 PAGE_NONE_SHARED 296#define __S000 PAGE_NONE
309#define __S001 PAGE_RO_SHARED 297#define __S001 PAGE_RO
310#define __S010 PAGE_SHARED 298#define __S010 PAGE_RW
311#define __S011 PAGE_SHARED 299#define __S011 PAGE_RW
312#define __S100 PAGE_RO_SHARED 300#define __S100 PAGE_RO
313#define __S101 PAGE_RO_SHARED 301#define __S101 PAGE_RO
314#define __S110 PAGE_SHARED 302#define __S110 PAGE_RW
315#define __S111 PAGE_SHARED 303#define __S111 PAGE_RW
316 304
317/* 305/*
318 * Certain architectures need to do special things when PTEs 306 * Certain architectures need to do special things when PTEs
@@ -377,18 +365,18 @@ static inline int pmd_bad(pmd_t pmd)
377 365
378static inline int pte_none(pte_t pte) 366static inline int pte_none(pte_t pte)
379{ 367{
380 return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_EMPTY; 368 return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_EMPTY;
381} 369}
382 370
383static inline int pte_present(pte_t pte) 371static inline int pte_present(pte_t pte)
384{ 372{
385 return !(pte_val(pte) & _PAGE_INVALID) || 373 return !(pte_val(pte) & _PAGE_INVALID) ||
386 (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_NONE; 374 (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_NONE;
387} 375}
388 376
389static inline int pte_file(pte_t pte) 377static inline int pte_file(pte_t pte)
390{ 378{
391 return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_FILE; 379 return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_FILE;
392} 380}
393 381
394#define pte_same(a,b) (pte_val(a) == pte_val(b)) 382#define pte_same(a,b) (pte_val(a) == pte_val(b))
@@ -461,7 +449,7 @@ static inline void pmd_clear(pmd_t * pmdp)
461 449
462static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 450static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
463{ 451{
464 pte_val(*ptep) = _PAGE_INVALID_EMPTY; 452 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
465} 453}
466 454
467/* 455/*
@@ -477,7 +465,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
477 465
478static inline pte_t pte_wrprotect(pte_t pte) 466static inline pte_t pte_wrprotect(pte_t pte)
479{ 467{
480 /* Do not clobber _PAGE_INVALID_NONE pages! */ 468 /* Do not clobber _PAGE_TYPE_NONE pages! */
481 if (!(pte_val(pte) & _PAGE_INVALID)) 469 if (!(pte_val(pte) & _PAGE_INVALID))
482 pte_val(pte) |= _PAGE_RO; 470 pte_val(pte) |= _PAGE_RO;
483 return pte; 471 return pte;
@@ -556,26 +544,31 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
556 return pte; 544 return pte;
557} 545}
558 546
559static inline pte_t 547static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
560ptep_clear_flush(struct vm_area_struct *vma,
561 unsigned long address, pte_t *ptep)
562{ 548{
563 pte_t pte = *ptep; 549 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
564#ifndef __s390x__ 550#ifndef __s390x__
565 if (!(pte_val(pte) & _PAGE_INVALID)) {
566 /* S390 has 1mb segments, we are emulating 4MB segments */ 551 /* S390 has 1mb segments, we are emulating 4MB segments */
567 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 552 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
568 __asm__ __volatile__ ("ipte %2,%3" 553#else
569 : "=m" (*ptep) : "m" (*ptep), 554 /* ipte in zarch mode can do the math */
570 "a" (pto), "a" (address) ); 555 pte_t *pto = ptep;
556#endif
557 asm volatile(
558 " ipte %2,%3"
559 : "=m" (*ptep) : "m" (*ptep),
560 "a" (pto), "a" (address));
571 } 561 }
572#else /* __s390x__ */ 562 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
573 if (!(pte_val(pte) & _PAGE_INVALID)) 563}
574 __asm__ __volatile__ ("ipte %2,%3" 564
575 : "=m" (*ptep) : "m" (*ptep), 565static inline pte_t
576 "a" (ptep), "a" (address) ); 566ptep_clear_flush(struct vm_area_struct *vma,
577#endif /* __s390x__ */ 567 unsigned long address, pte_t *ptep)
578 pte_val(*ptep) = _PAGE_INVALID_EMPTY; 568{
569 pte_t pte = *ptep;
570
571 __ptep_ipte(address, ptep);
579 return pte; 572 return pte;
580} 573}
581 574
@@ -604,30 +597,31 @@ ptep_establish(struct vm_area_struct *vma,
604 * should therefore only be called if it is not mapped in any 597 * should therefore only be called if it is not mapped in any
605 * address space. 598 * address space.
606 */ 599 */
607#define page_test_and_clear_dirty(_page) \ 600static inline int page_test_and_clear_dirty(struct page *page)
608({ \ 601{
609 struct page *__page = (_page); \ 602 unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
610 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ 603 int skey = page_get_storage_key(physpage);
611 int __skey = page_get_storage_key(__physpage); \ 604
612 if (__skey & _PAGE_CHANGED) \ 605 if (skey & _PAGE_CHANGED)
613 page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\ 606 page_set_storage_key(physpage, skey & ~_PAGE_CHANGED);
614 (__skey & _PAGE_CHANGED); \ 607 return skey & _PAGE_CHANGED;
615}) 608}
616 609
617/* 610/*
618 * Test and clear referenced bit in storage key. 611 * Test and clear referenced bit in storage key.
619 */ 612 */
620#define page_test_and_clear_young(page) \ 613static inline int page_test_and_clear_young(struct page *page)
621({ \ 614{
622 struct page *__page = (page); \ 615 unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
623 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ 616 int ccode;
624 int __ccode; \ 617
625 asm volatile ("rrbe 0,%1\n\t" \ 618 asm volatile (
626 "ipm %0\n\t" \ 619 "rrbe 0,%1\n"
627 "srl %0,28\n\t" \ 620 "ipm %0\n"
628 : "=d" (__ccode) : "a" (__physpage) : "cc" ); \ 621 "srl %0,28\n"
629 (__ccode & 2); \ 622 : "=d" (ccode) : "a" (physpage) : "cc" );
630}) 623 return ccode & 2;
624}
631 625
632/* 626/*
633 * Conversion functions: convert a page and protection to a page entry, 627 * Conversion functions: convert a page and protection to a page entry,
@@ -640,43 +634,41 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
640 return __pte; 634 return __pte;
641} 635}
642 636
643#define mk_pte(pg, pgprot) \ 637static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
644({ \ 638{
645 struct page *__page = (pg); \ 639 unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
646 pgprot_t __pgprot = (pgprot); \
647 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
648 pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
649 __pte; \
650})
651 640
652#define pfn_pte(pfn, pgprot) \ 641 return mk_pte_phys(physpage, pgprot);
653({ \ 642}
654 pgprot_t __pgprot = (pgprot); \ 643
655 unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ 644static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
656 pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ 645{
657 __pte; \ 646 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
658}) 647
648 return mk_pte_phys(physpage, pgprot);
649}
659 650
660#ifdef __s390x__ 651#ifdef __s390x__
661 652
662#define pfn_pmd(pfn, pgprot) \ 653static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
663({ \ 654{
664 pgprot_t __pgprot = (pgprot); \ 655 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
665 unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ 656
666 pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \ 657 return __pmd(physpage + pgprot_val(pgprot));
667 __pmd; \ 658}
668})
669 659
670#endif /* __s390x__ */ 660#endif /* __s390x__ */
671 661
672#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 662#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
673#define pte_page(x) pfn_to_page(pte_pfn(x)) 663#define pte_page(x) pfn_to_page(pte_pfn(x))
674 664
675#define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK) 665#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
676 666
677#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT)) 667#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT))
678 668
679#define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK) 669#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK)
670
671#define pgd_page(pgd) (mem_map+(pgd_val(pgd) >> PAGE_SHIFT))
680 672
681/* to find an entry in a page-table-directory */ 673/* to find an entry in a page-table-directory */
682#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 674#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
@@ -698,14 +690,14 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
698/* Find an entry in the second-level page table.. */ 690/* Find an entry in the second-level page table.. */
699#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 691#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
700#define pmd_offset(dir,addr) \ 692#define pmd_offset(dir,addr) \
701 ((pmd_t *) pgd_page_kernel(*(dir)) + pmd_index(addr)) 693 ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr))
702 694
703#endif /* __s390x__ */ 695#endif /* __s390x__ */
704 696
705/* Find an entry in the third-level page table.. */ 697/* Find an entry in the third-level page table.. */
706#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 698#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
707#define pte_offset_kernel(pmd, address) \ 699#define pte_offset_kernel(pmd, address) \
708 ((pte_t *) pmd_page_kernel(*(pmd)) + pte_index(address)) 700 ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
709#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 701#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
710#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) 702#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
711#define pte_unmap(pte) do { } while (0) 703#define pte_unmap(pte) do { } while (0)
@@ -755,7 +747,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
755{ 747{
756 pte_t pte; 748 pte_t pte;
757 offset &= __SWP_OFFSET_MASK; 749 offset &= __SWP_OFFSET_MASK;
758 pte_val(pte) = _PAGE_INVALID_SWAP | ((type & 0x1f) << 2) | 750 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
759 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 751 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
760 return pte; 752 return pte;
761} 753}
@@ -778,7 +770,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
778 770
779#define pgoff_to_pte(__off) \ 771#define pgoff_to_pte(__off) \
780 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 772 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
781 | _PAGE_INVALID_FILE }) 773 | _PAGE_TYPE_FILE })
782 774
783#endif /* !__ASSEMBLY__ */ 775#endif /* !__ASSEMBLY__ */
784 776
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 5b71d3731723..cbbedc63ba25 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -13,7 +13,6 @@
13#ifndef __ASM_S390_PROCESSOR_H 13#ifndef __ASM_S390_PROCESSOR_H
14#define __ASM_S390_PROCESSOR_H 14#define __ASM_S390_PROCESSOR_H
15 15
16#include <asm/page.h>
17#include <asm/ptrace.h> 16#include <asm/ptrace.h>
18 17
19#ifdef __KERNEL__ 18#ifdef __KERNEL__
@@ -21,7 +20,7 @@
21 * Default implementation of macro that returns current 20 * Default implementation of macro that returns current
22 * instruction pointer ("program counter"). 21 * instruction pointer ("program counter").
23 */ 22 */
24#define current_text_addr() ({ void *pc; __asm__("basr %0,0":"=a"(pc)); pc; }) 23#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
25 24
26/* 25/*
27 * CPU type and hardware bug flags. Kept separately for each CPU. 26 * CPU type and hardware bug flags. Kept separately for each CPU.
@@ -202,7 +201,7 @@ unsigned long get_wchan(struct task_struct *p);
202static inline void cpu_relax(void) 201static inline void cpu_relax(void)
203{ 202{
204 if (MACHINE_HAS_DIAG44) 203 if (MACHINE_HAS_DIAG44)
205 asm volatile ("diag 0,0,68" : : : "memory"); 204 asm volatile("diag 0,0,68" : : : "memory");
206 else 205 else
207 barrier(); 206 barrier();
208} 207}
@@ -213,9 +212,9 @@ static inline void cpu_relax(void)
213static inline void __load_psw(psw_t psw) 212static inline void __load_psw(psw_t psw)
214{ 213{
215#ifndef __s390x__ 214#ifndef __s390x__
216 asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 215 asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc");
217#else 216#else
218 asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 217 asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc");
219#endif 218#endif
220} 219}
221 220
@@ -232,20 +231,20 @@ static inline void __load_psw_mask (unsigned long mask)
232 psw.mask = mask; 231 psw.mask = mask;
233 232
234#ifndef __s390x__ 233#ifndef __s390x__
235 asm volatile ( 234 asm volatile(
236 " basr %0,0\n" 235 " basr %0,0\n"
237 "0: ahi %0,1f-0b\n" 236 "0: ahi %0,1f-0b\n"
238 " st %0,4(%1)\n" 237 " st %0,4(%1)\n"
239 " lpsw 0(%1)\n" 238 " lpsw 0(%1)\n"
240 "1:" 239 "1:"
241 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" ); 240 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc");
242#else /* __s390x__ */ 241#else /* __s390x__ */
243 asm volatile ( 242 asm volatile(
244 " larl %0,1f\n" 243 " larl %0,1f\n"
245 " stg %0,8(%1)\n" 244 " stg %0,8(%1)\n"
246 " lpswe 0(%1)\n" 245 " lpswe 0(%1)\n"
247 "1:" 246 "1:"
248 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" ); 247 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc");
249#endif /* __s390x__ */ 248#endif /* __s390x__ */
250} 249}
251 250
@@ -274,56 +273,57 @@ static inline void disabled_wait(unsigned long code)
274 * the processor is dead afterwards 273 * the processor is dead afterwards
275 */ 274 */
276#ifndef __s390x__ 275#ifndef __s390x__
277 asm volatile (" stctl 0,0,0(%2)\n" 276 asm volatile(
278 " ni 0(%2),0xef\n" /* switch off protection */ 277 " stctl 0,0,0(%2)\n"
279 " lctl 0,0,0(%2)\n" 278 " ni 0(%2),0xef\n" /* switch off protection */
280 " stpt 0xd8\n" /* store timer */ 279 " lctl 0,0,0(%2)\n"
281 " stckc 0xe0\n" /* store clock comparator */ 280 " stpt 0xd8\n" /* store timer */
282 " stpx 0x108\n" /* store prefix register */ 281 " stckc 0xe0\n" /* store clock comparator */
283 " stam 0,15,0x120\n" /* store access registers */ 282 " stpx 0x108\n" /* store prefix register */
284 " std 0,0x160\n" /* store f0 */ 283 " stam 0,15,0x120\n" /* store access registers */
285 " std 2,0x168\n" /* store f2 */ 284 " std 0,0x160\n" /* store f0 */
286 " std 4,0x170\n" /* store f4 */ 285 " std 2,0x168\n" /* store f2 */
287 " std 6,0x178\n" /* store f6 */ 286 " std 4,0x170\n" /* store f4 */
288 " stm 0,15,0x180\n" /* store general registers */ 287 " std 6,0x178\n" /* store f6 */
289 " stctl 0,15,0x1c0\n" /* store control registers */ 288 " stm 0,15,0x180\n" /* store general registers */
290 " oi 0x1c0,0x10\n" /* fake protection bit */ 289 " stctl 0,15,0x1c0\n" /* store control registers */
291 " lpsw 0(%1)" 290 " oi 0x1c0,0x10\n" /* fake protection bit */
292 : "=m" (ctl_buf) 291 " lpsw 0(%1)"
293 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); 292 : "=m" (ctl_buf)
293 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
294#else /* __s390x__ */ 294#else /* __s390x__ */
295 asm volatile (" stctg 0,0,0(%2)\n" 295 asm volatile(
296 " ni 4(%2),0xef\n" /* switch off protection */ 296 " stctg 0,0,0(%2)\n"
297 " lctlg 0,0,0(%2)\n" 297 " ni 4(%2),0xef\n" /* switch off protection */
298 " lghi 1,0x1000\n" 298 " lctlg 0,0,0(%2)\n"
299 " stpt 0x328(1)\n" /* store timer */ 299 " lghi 1,0x1000\n"
300 " stckc 0x330(1)\n" /* store clock comparator */ 300 " stpt 0x328(1)\n" /* store timer */
301 " stpx 0x318(1)\n" /* store prefix register */ 301 " stckc 0x330(1)\n" /* store clock comparator */
302 " stam 0,15,0x340(1)\n" /* store access registers */ 302 " stpx 0x318(1)\n" /* store prefix register */
303 " stfpc 0x31c(1)\n" /* store fpu control */ 303 " stam 0,15,0x340(1)\n"/* store access registers */
304 " std 0,0x200(1)\n" /* store f0 */ 304 " stfpc 0x31c(1)\n" /* store fpu control */
305 " std 1,0x208(1)\n" /* store f1 */ 305 " std 0,0x200(1)\n" /* store f0 */
306 " std 2,0x210(1)\n" /* store f2 */ 306 " std 1,0x208(1)\n" /* store f1 */
307 " std 3,0x218(1)\n" /* store f3 */ 307 " std 2,0x210(1)\n" /* store f2 */
308 " std 4,0x220(1)\n" /* store f4 */ 308 " std 3,0x218(1)\n" /* store f3 */
309 " std 5,0x228(1)\n" /* store f5 */ 309 " std 4,0x220(1)\n" /* store f4 */
310 " std 6,0x230(1)\n" /* store f6 */ 310 " std 5,0x228(1)\n" /* store f5 */
311 " std 7,0x238(1)\n" /* store f7 */ 311 " std 6,0x230(1)\n" /* store f6 */
312 " std 8,0x240(1)\n" /* store f8 */ 312 " std 7,0x238(1)\n" /* store f7 */
313 " std 9,0x248(1)\n" /* store f9 */ 313 " std 8,0x240(1)\n" /* store f8 */
314 " std 10,0x250(1)\n" /* store f10 */ 314 " std 9,0x248(1)\n" /* store f9 */
315 " std 11,0x258(1)\n" /* store f11 */ 315 " std 10,0x250(1)\n" /* store f10 */
316 " std 12,0x260(1)\n" /* store f12 */ 316 " std 11,0x258(1)\n" /* store f11 */
317 " std 13,0x268(1)\n" /* store f13 */ 317 " std 12,0x260(1)\n" /* store f12 */
318 " std 14,0x270(1)\n" /* store f14 */ 318 " std 13,0x268(1)\n" /* store f13 */
319 " std 15,0x278(1)\n" /* store f15 */ 319 " std 14,0x270(1)\n" /* store f14 */
320 " stmg 0,15,0x280(1)\n" /* store general registers */ 320 " std 15,0x278(1)\n" /* store f15 */
321 " stctg 0,15,0x380(1)\n" /* store control registers */ 321 " stmg 0,15,0x280(1)\n"/* store general registers */
322 " oi 0x384(1),0x10\n" /* fake protection bit */ 322 " stctg 0,15,0x380(1)\n"/* store control registers */
323 " lpswe 0(%1)" 323 " oi 0x384(1),0x10\n"/* fake protection bit */
324 : "=m" (ctl_buf) 324 " lpswe 0(%1)"
325 : "a" (&dw_psw), "a" (&ctl_buf), 325 : "=m" (ctl_buf)
326 "m" (dw_psw) : "cc", "0", "1"); 326 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0");
327#endif /* __s390x__ */ 327#endif /* __s390x__ */
328} 328}
329 329
@@ -337,6 +337,25 @@ struct notifier_block;
337int register_idle_notifier(struct notifier_block *nb); 337int register_idle_notifier(struct notifier_block *nb);
338int unregister_idle_notifier(struct notifier_block *nb); 338int unregister_idle_notifier(struct notifier_block *nb);
339 339
340#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
341
342#endif
343
344/*
345 * Helper macro for exception table entries
346 */
347#ifndef __s390x__
348#define EX_TABLE(_fault,_target) \
349 ".section __ex_table,\"a\"\n" \
350 " .align 4\n" \
351 " .long " #_fault "," #_target "\n" \
352 ".previous\n"
353#else
354#define EX_TABLE(_fault,_target) \
355 ".section __ex_table,\"a\"\n" \
356 " .align 8\n" \
357 " .quad " #_fault "," #_target "\n" \
358 ".previous\n"
340#endif 359#endif
341 360
342#endif /* __ASM_S390_PROCESSOR_H */ 361#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 4d75d77b0f99..8d2bf65b0b64 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -479,7 +479,7 @@ extern void show_regs(struct pt_regs * regs);
479static inline void 479static inline void
480psw_set_key(unsigned int key) 480psw_set_key(unsigned int key)
481{ 481{
482 asm volatile ( "spka 0(%0)" : : "d" (key) ); 482 asm volatile("spka 0(%0)" : : "d" (key));
483} 483}
484 484
485#endif /* __ASSEMBLY__ */ 485#endif /* __ASSEMBLY__ */
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
index 13ec16965150..90f4eccaa290 100644
--- a/include/asm-s390/rwsem.h
+++ b/include/asm-s390/rwsem.h
@@ -122,23 +122,23 @@ static inline void __down_read(struct rw_semaphore *sem)
122{ 122{
123 signed long old, new; 123 signed long old, new;
124 124
125 __asm__ __volatile__( 125 asm volatile(
126#ifndef __s390x__ 126#ifndef __s390x__
127 " l %0,0(%3)\n" 127 " l %0,0(%3)\n"
128 "0: lr %1,%0\n" 128 "0: lr %1,%0\n"
129 " ahi %1,%5\n" 129 " ahi %1,%5\n"
130 " cs %0,%1,0(%3)\n" 130 " cs %0,%1,0(%3)\n"
131 " jl 0b" 131 " jl 0b"
132#else /* __s390x__ */ 132#else /* __s390x__ */
133 " lg %0,0(%3)\n" 133 " lg %0,0(%3)\n"
134 "0: lgr %1,%0\n" 134 "0: lgr %1,%0\n"
135 " aghi %1,%5\n" 135 " aghi %1,%5\n"
136 " csg %0,%1,0(%3)\n" 136 " csg %0,%1,0(%3)\n"
137 " jl 0b" 137 " jl 0b"
138#endif /* __s390x__ */ 138#endif /* __s390x__ */
139 : "=&d" (old), "=&d" (new), "=m" (sem->count) 139 : "=&d" (old), "=&d" (new), "=m" (sem->count)
140 : "a" (&sem->count), "m" (sem->count), 140 : "a" (&sem->count), "m" (sem->count),
141 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" ); 141 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
142 if (old < 0) 142 if (old < 0)
143 rwsem_down_read_failed(sem); 143 rwsem_down_read_failed(sem);
144} 144}
@@ -150,27 +150,27 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
150{ 150{
151 signed long old, new; 151 signed long old, new;
152 152
153 __asm__ __volatile__( 153 asm volatile(
154#ifndef __s390x__ 154#ifndef __s390x__
155 " l %0,0(%3)\n" 155 " l %0,0(%3)\n"
156 "0: ltr %1,%0\n" 156 "0: ltr %1,%0\n"
157 " jm 1f\n" 157 " jm 1f\n"
158 " ahi %1,%5\n" 158 " ahi %1,%5\n"
159 " cs %0,%1,0(%3)\n" 159 " cs %0,%1,0(%3)\n"
160 " jl 0b\n" 160 " jl 0b\n"
161 "1:" 161 "1:"
162#else /* __s390x__ */ 162#else /* __s390x__ */
163 " lg %0,0(%3)\n" 163 " lg %0,0(%3)\n"
164 "0: ltgr %1,%0\n" 164 "0: ltgr %1,%0\n"
165 " jm 1f\n" 165 " jm 1f\n"
166 " aghi %1,%5\n" 166 " aghi %1,%5\n"
167 " csg %0,%1,0(%3)\n" 167 " csg %0,%1,0(%3)\n"
168 " jl 0b\n" 168 " jl 0b\n"
169 "1:" 169 "1:"
170#endif /* __s390x__ */ 170#endif /* __s390x__ */
171 : "=&d" (old), "=&d" (new), "=m" (sem->count) 171 : "=&d" (old), "=&d" (new), "=m" (sem->count)
172 : "a" (&sem->count), "m" (sem->count), 172 : "a" (&sem->count), "m" (sem->count),
173 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" ); 173 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
174 return old >= 0 ? 1 : 0; 174 return old >= 0 ? 1 : 0;
175} 175}
176 176
@@ -182,23 +182,23 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
182 signed long old, new, tmp; 182 signed long old, new, tmp;
183 183
184 tmp = RWSEM_ACTIVE_WRITE_BIAS; 184 tmp = RWSEM_ACTIVE_WRITE_BIAS;
185 __asm__ __volatile__( 185 asm volatile(
186#ifndef __s390x__ 186#ifndef __s390x__
187 " l %0,0(%3)\n" 187 " l %0,0(%3)\n"
188 "0: lr %1,%0\n" 188 "0: lr %1,%0\n"
189 " a %1,%5\n" 189 " a %1,%5\n"
190 " cs %0,%1,0(%3)\n" 190 " cs %0,%1,0(%3)\n"
191 " jl 0b" 191 " jl 0b"
192#else /* __s390x__ */ 192#else /* __s390x__ */
193 " lg %0,0(%3)\n" 193 " lg %0,0(%3)\n"
194 "0: lgr %1,%0\n" 194 "0: lgr %1,%0\n"
195 " ag %1,%5\n" 195 " ag %1,%5\n"
196 " csg %0,%1,0(%3)\n" 196 " csg %0,%1,0(%3)\n"
197 " jl 0b" 197 " jl 0b"
198#endif /* __s390x__ */ 198#endif /* __s390x__ */
199 : "=&d" (old), "=&d" (new), "=m" (sem->count) 199 : "=&d" (old), "=&d" (new), "=m" (sem->count)
200 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 200 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
201 : "cc", "memory" ); 201 : "cc", "memory");
202 if (old != 0) 202 if (old != 0)
203 rwsem_down_write_failed(sem); 203 rwsem_down_write_failed(sem);
204} 204}
@@ -215,24 +215,24 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
215{ 215{
216 signed long old; 216 signed long old;
217 217
218 __asm__ __volatile__( 218 asm volatile(
219#ifndef __s390x__ 219#ifndef __s390x__
220 " l %0,0(%2)\n" 220 " l %0,0(%2)\n"
221 "0: ltr %0,%0\n" 221 "0: ltr %0,%0\n"
222 " jnz 1f\n" 222 " jnz 1f\n"
223 " cs %0,%4,0(%2)\n" 223 " cs %0,%4,0(%2)\n"
224 " jl 0b\n" 224 " jl 0b\n"
225#else /* __s390x__ */ 225#else /* __s390x__ */
226 " lg %0,0(%2)\n" 226 " lg %0,0(%2)\n"
227 "0: ltgr %0,%0\n" 227 "0: ltgr %0,%0\n"
228 " jnz 1f\n" 228 " jnz 1f\n"
229 " csg %0,%4,0(%2)\n" 229 " csg %0,%4,0(%2)\n"
230 " jl 0b\n" 230 " jl 0b\n"
231#endif /* __s390x__ */ 231#endif /* __s390x__ */
232 "1:" 232 "1:"
233 : "=&d" (old), "=m" (sem->count) 233 : "=&d" (old), "=m" (sem->count)
234 : "a" (&sem->count), "m" (sem->count), 234 : "a" (&sem->count), "m" (sem->count),
235 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" ); 235 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory");
236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; 236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
237} 237}
238 238
@@ -243,24 +243,24 @@ static inline void __up_read(struct rw_semaphore *sem)
243{ 243{
244 signed long old, new; 244 signed long old, new;
245 245
246 __asm__ __volatile__( 246 asm volatile(
247#ifndef __s390x__ 247#ifndef __s390x__
248 " l %0,0(%3)\n" 248 " l %0,0(%3)\n"
249 "0: lr %1,%0\n" 249 "0: lr %1,%0\n"
250 " ahi %1,%5\n" 250 " ahi %1,%5\n"
251 " cs %0,%1,0(%3)\n" 251 " cs %0,%1,0(%3)\n"
252 " jl 0b" 252 " jl 0b"
253#else /* __s390x__ */ 253#else /* __s390x__ */
254 " lg %0,0(%3)\n" 254 " lg %0,0(%3)\n"
255 "0: lgr %1,%0\n" 255 "0: lgr %1,%0\n"
256 " aghi %1,%5\n" 256 " aghi %1,%5\n"
257 " csg %0,%1,0(%3)\n" 257 " csg %0,%1,0(%3)\n"
258 " jl 0b" 258 " jl 0b"
259#endif /* __s390x__ */ 259#endif /* __s390x__ */
260 : "=&d" (old), "=&d" (new), "=m" (sem->count) 260 : "=&d" (old), "=&d" (new), "=m" (sem->count)
261 : "a" (&sem->count), "m" (sem->count), 261 : "a" (&sem->count), "m" (sem->count),
262 "i" (-RWSEM_ACTIVE_READ_BIAS) 262 "i" (-RWSEM_ACTIVE_READ_BIAS)
263 : "cc", "memory" ); 263 : "cc", "memory");
264 if (new < 0) 264 if (new < 0)
265 if ((new & RWSEM_ACTIVE_MASK) == 0) 265 if ((new & RWSEM_ACTIVE_MASK) == 0)
266 rwsem_wake(sem); 266 rwsem_wake(sem);
@@ -274,23 +274,23 @@ static inline void __up_write(struct rw_semaphore *sem)
274 signed long old, new, tmp; 274 signed long old, new, tmp;
275 275
276 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 276 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
277 __asm__ __volatile__( 277 asm volatile(
278#ifndef __s390x__ 278#ifndef __s390x__
279 " l %0,0(%3)\n" 279 " l %0,0(%3)\n"
280 "0: lr %1,%0\n" 280 "0: lr %1,%0\n"
281 " a %1,%5\n" 281 " a %1,%5\n"
282 " cs %0,%1,0(%3)\n" 282 " cs %0,%1,0(%3)\n"
283 " jl 0b" 283 " jl 0b"
284#else /* __s390x__ */ 284#else /* __s390x__ */
285 " lg %0,0(%3)\n" 285 " lg %0,0(%3)\n"
286 "0: lgr %1,%0\n" 286 "0: lgr %1,%0\n"
287 " ag %1,%5\n" 287 " ag %1,%5\n"
288 " csg %0,%1,0(%3)\n" 288 " csg %0,%1,0(%3)\n"
289 " jl 0b" 289 " jl 0b"
290#endif /* __s390x__ */ 290#endif /* __s390x__ */
291 : "=&d" (old), "=&d" (new), "=m" (sem->count) 291 : "=&d" (old), "=&d" (new), "=m" (sem->count)
292 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 292 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
293 : "cc", "memory" ); 293 : "cc", "memory");
294 if (new < 0) 294 if (new < 0)
295 if ((new & RWSEM_ACTIVE_MASK) == 0) 295 if ((new & RWSEM_ACTIVE_MASK) == 0)
296 rwsem_wake(sem); 296 rwsem_wake(sem);
@@ -304,23 +304,23 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
304 signed long old, new, tmp; 304 signed long old, new, tmp;
305 305
306 tmp = -RWSEM_WAITING_BIAS; 306 tmp = -RWSEM_WAITING_BIAS;
307 __asm__ __volatile__( 307 asm volatile(
308#ifndef __s390x__ 308#ifndef __s390x__
309 " l %0,0(%3)\n" 309 " l %0,0(%3)\n"
310 "0: lr %1,%0\n" 310 "0: lr %1,%0\n"
311 " a %1,%5\n" 311 " a %1,%5\n"
312 " cs %0,%1,0(%3)\n" 312 " cs %0,%1,0(%3)\n"
313 " jl 0b" 313 " jl 0b"
314#else /* __s390x__ */ 314#else /* __s390x__ */
315 " lg %0,0(%3)\n" 315 " lg %0,0(%3)\n"
316 "0: lgr %1,%0\n" 316 "0: lgr %1,%0\n"
317 " ag %1,%5\n" 317 " ag %1,%5\n"
318 " csg %0,%1,0(%3)\n" 318 " csg %0,%1,0(%3)\n"
319 " jl 0b" 319 " jl 0b"
320#endif /* __s390x__ */ 320#endif /* __s390x__ */
321 : "=&d" (old), "=&d" (new), "=m" (sem->count) 321 : "=&d" (old), "=&d" (new), "=m" (sem->count)
322 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 322 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
323 : "cc", "memory" ); 323 : "cc", "memory");
324 if (new > 1) 324 if (new > 1)
325 rwsem_downgrade_wake(sem); 325 rwsem_downgrade_wake(sem);
326} 326}
@@ -332,23 +332,23 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
332{ 332{
333 signed long old, new; 333 signed long old, new;
334 334
335 __asm__ __volatile__( 335 asm volatile(
336#ifndef __s390x__ 336#ifndef __s390x__
337 " l %0,0(%3)\n" 337 " l %0,0(%3)\n"
338 "0: lr %1,%0\n" 338 "0: lr %1,%0\n"
339 " ar %1,%5\n" 339 " ar %1,%5\n"
340 " cs %0,%1,0(%3)\n" 340 " cs %0,%1,0(%3)\n"
341 " jl 0b" 341 " jl 0b"
342#else /* __s390x__ */ 342#else /* __s390x__ */
343 " lg %0,0(%3)\n" 343 " lg %0,0(%3)\n"
344 "0: lgr %1,%0\n" 344 "0: lgr %1,%0\n"
345 " agr %1,%5\n" 345 " agr %1,%5\n"
346 " csg %0,%1,0(%3)\n" 346 " csg %0,%1,0(%3)\n"
347 " jl 0b" 347 " jl 0b"
348#endif /* __s390x__ */ 348#endif /* __s390x__ */
349 : "=&d" (old), "=&d" (new), "=m" (sem->count) 349 : "=&d" (old), "=&d" (new), "=m" (sem->count)
350 : "a" (&sem->count), "m" (sem->count), "d" (delta) 350 : "a" (&sem->count), "m" (sem->count), "d" (delta)
351 : "cc", "memory" ); 351 : "cc", "memory");
352} 352}
353 353
354/* 354/*
@@ -358,23 +358,23 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
358{ 358{
359 signed long old, new; 359 signed long old, new;
360 360
361 __asm__ __volatile__( 361 asm volatile(
362#ifndef __s390x__ 362#ifndef __s390x__
363 " l %0,0(%3)\n" 363 " l %0,0(%3)\n"
364 "0: lr %1,%0\n" 364 "0: lr %1,%0\n"
365 " ar %1,%5\n" 365 " ar %1,%5\n"
366 " cs %0,%1,0(%3)\n" 366 " cs %0,%1,0(%3)\n"
367 " jl 0b" 367 " jl 0b"
368#else /* __s390x__ */ 368#else /* __s390x__ */
369 " lg %0,0(%3)\n" 369 " lg %0,0(%3)\n"
370 "0: lgr %1,%0\n" 370 "0: lgr %1,%0\n"
371 " agr %1,%5\n" 371 " agr %1,%5\n"
372 " csg %0,%1,0(%3)\n" 372 " csg %0,%1,0(%3)\n"
373 " jl 0b" 373 " jl 0b"
374#endif /* __s390x__ */ 374#endif /* __s390x__ */
375 : "=&d" (old), "=&d" (new), "=m" (sem->count) 375 : "=&d" (old), "=&d" (new), "=m" (sem->count)
376 : "a" (&sem->count), "m" (sem->count), "d" (delta) 376 : "a" (&sem->count), "m" (sem->count), "d" (delta)
377 : "cc", "memory" ); 377 : "cc", "memory");
378 return new; 378 return new;
379} 379}
380 380
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 32cdc69f39f4..dbce058aefa9 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -85,17 +85,17 @@ static inline int down_trylock(struct semaphore * sem)
85 * sem->count.counter = --new_val; 85 * sem->count.counter = --new_val;
86 * In the ppc code this is called atomic_dec_if_positive. 86 * In the ppc code this is called atomic_dec_if_positive.
87 */ 87 */
88 __asm__ __volatile__ ( 88 asm volatile(
89 " l %0,0(%3)\n" 89 " l %0,0(%3)\n"
90 "0: ltr %1,%0\n" 90 "0: ltr %1,%0\n"
91 " jle 1f\n" 91 " jle 1f\n"
92 " ahi %1,-1\n" 92 " ahi %1,-1\n"
93 " cs %0,%1,0(%3)\n" 93 " cs %0,%1,0(%3)\n"
94 " jl 0b\n" 94 " jl 0b\n"
95 "1:" 95 "1:"
96 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter) 96 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
97 : "a" (&sem->count.counter), "m" (sem->count.counter) 97 : "a" (&sem->count.counter), "m" (sem->count.counter)
98 : "cc", "memory" ); 98 : "cc", "memory");
99 return old_val <= 0; 99 return old_val <= 0;
100} 100}
101 101
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 19e31979309a..5d72eda8a11b 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -14,8 +14,6 @@
14 14
15#define PARMAREA 0x10400 15#define PARMAREA 0x10400
16#define COMMAND_LINE_SIZE 896 16#define COMMAND_LINE_SIZE 896
17#define RAMDISK_ORIGIN 0x800000
18#define RAMDISK_SIZE 0x800000
19#define MEMORY_CHUNKS 16 /* max 0x7fff */ 17#define MEMORY_CHUNKS 16 /* max 0x7fff */
20#define IPL_PARMBLOCK_ORIGIN 0x2000 18#define IPL_PARMBLOCK_ORIGIN 0x2000
21 19
@@ -41,15 +39,18 @@ extern unsigned long machine_flags;
41#define MACHINE_IS_P390 (machine_flags & 4) 39#define MACHINE_IS_P390 (machine_flags & 4)
42#define MACHINE_HAS_MVPG (machine_flags & 16) 40#define MACHINE_HAS_MVPG (machine_flags & 16)
43#define MACHINE_HAS_IDTE (machine_flags & 128) 41#define MACHINE_HAS_IDTE (machine_flags & 128)
42#define MACHINE_HAS_DIAG9C (machine_flags & 256)
44 43
45#ifndef __s390x__ 44#ifndef __s390x__
46#define MACHINE_HAS_IEEE (machine_flags & 2) 45#define MACHINE_HAS_IEEE (machine_flags & 2)
47#define MACHINE_HAS_CSP (machine_flags & 8) 46#define MACHINE_HAS_CSP (machine_flags & 8)
48#define MACHINE_HAS_DIAG44 (1) 47#define MACHINE_HAS_DIAG44 (1)
48#define MACHINE_HAS_MVCOS (0)
49#else /* __s390x__ */ 49#else /* __s390x__ */
50#define MACHINE_HAS_IEEE (1) 50#define MACHINE_HAS_IEEE (1)
51#define MACHINE_HAS_CSP (1) 51#define MACHINE_HAS_CSP (1)
52#define MACHINE_HAS_DIAG44 (machine_flags & 32) 52#define MACHINE_HAS_DIAG44 (machine_flags & 32)
53#define MACHINE_HAS_MVCOS (machine_flags & 512)
53#endif /* __s390x__ */ 54#endif /* __s390x__ */
54 55
55 56
@@ -70,52 +71,76 @@ extern unsigned int console_irq;
70#define SET_CONSOLE_3215 do { console_mode = 2; } while (0) 71#define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
71#define SET_CONSOLE_3270 do { console_mode = 3; } while (0) 72#define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
72 73
73struct ipl_list_header { 74
74 u32 length; 75struct ipl_list_hdr {
75 u8 reserved[3]; 76 u32 len;
77 u8 reserved1[3];
76 u8 version; 78 u8 version;
79 u32 blk0_len;
80 u8 pbt;
81 u8 flags;
82 u16 reserved2;
77} __attribute__((packed)); 83} __attribute__((packed));
78 84
79struct ipl_block_fcp { 85struct ipl_block_fcp {
80 u32 length; 86 u8 reserved1[313-1];
81 u8 pbt; 87 u8 opt;
82 u8 reserved1[322-1]; 88 u8 reserved2[3];
89 u16 reserved3;
83 u16 devno; 90 u16 devno;
84 u8 reserved2[4]; 91 u8 reserved4[4];
85 u64 wwpn; 92 u64 wwpn;
86 u64 lun; 93 u64 lun;
87 u32 bootprog; 94 u32 bootprog;
88 u8 reserved3[12]; 95 u8 reserved5[12];
89 u64 br_lba; 96 u64 br_lba;
90 u32 scp_data_len; 97 u32 scp_data_len;
91 u8 reserved4[260]; 98 u8 reserved6[260];
92 u8 scp_data[]; 99 u8 scp_data[];
93} __attribute__((packed)); 100} __attribute__((packed));
94 101
102struct ipl_block_ccw {
103 u8 load_param[8];
104 u8 reserved1[84];
105 u8 reserved2[2];
106 u16 devno;
107 u8 vm_flags;
108 u8 reserved3[3];
109 u32 vm_parm_len;
110} __attribute__((packed));
111
95struct ipl_parameter_block { 112struct ipl_parameter_block {
113 struct ipl_list_hdr hdr;
96 union { 114 union {
97 u32 length; 115 struct ipl_block_fcp fcp;
98 struct ipl_list_header header; 116 struct ipl_block_ccw ccw;
99 } hdr; 117 } ipl_info;
100 struct ipl_block_fcp fcp;
101} __attribute__((packed)); 118} __attribute__((packed));
102 119
103#define IPL_MAX_SUPPORTED_VERSION (0) 120#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
121 sizeof(struct ipl_block_fcp))
104 122
105#define IPL_TYPE_FCP (0) 123#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
124 sizeof(struct ipl_block_ccw))
125
126#define IPL_MAX_SUPPORTED_VERSION (0)
106 127
107/* 128/*
108 * IPL validity flags and parameters as detected in head.S 129 * IPL validity flags and parameters as detected in head.S
109 */ 130 */
110extern u32 ipl_parameter_flags; 131extern u32 ipl_flags;
111extern u16 ipl_devno; 132extern u16 ipl_devno;
112 133
113#define IPL_DEVNO_VALID (ipl_parameter_flags & 1) 134void do_reipl(void);
114#define IPL_PARMBLOCK_VALID (ipl_parameter_flags & 2) 135
136enum {
137 IPL_DEVNO_VALID = 1,
138 IPL_PARMBLOCK_VALID = 2,
139};
115 140
116#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ 141#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
117 IPL_PARMBLOCK_ORIGIN) 142 IPL_PARMBLOCK_ORIGIN)
118#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.length) 143#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len)
119 144
120#else /* __ASSEMBLY__ */ 145#else /* __ASSEMBLY__ */
121 146
diff --git a/include/asm-s390/sfp-machine.h b/include/asm-s390/sfp-machine.h
index de69dfa46fbb..8ca8c77b2d04 100644
--- a/include/asm-s390/sfp-machine.h
+++ b/include/asm-s390/sfp-machine.h
@@ -76,21 +76,23 @@
76 unsigned int __r2 = (x2) + (y2); \ 76 unsigned int __r2 = (x2) + (y2); \
77 unsigned int __r1 = (x1); \ 77 unsigned int __r1 = (x1); \
78 unsigned int __r0 = (x0); \ 78 unsigned int __r0 = (x0); \
79 __asm__ (" alr %2,%3\n" \ 79 asm volatile( \
80 " brc 12,0f\n" \ 80 " alr %2,%3\n" \
81 " lhi 0,1\n" \ 81 " brc 12,0f\n" \
82 " alr %1,0\n" \ 82 " lhi 0,1\n" \
83 " brc 12,0f\n" \ 83 " alr %1,0\n" \
84 " alr %0,0\n" \ 84 " brc 12,0f\n" \
85 "0:" \ 85 " alr %0,0\n" \
86 : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ 86 "0:" \
87 : "d" (y0), "i" (1) : "cc", "0" ); \ 87 : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
88 __asm__ (" alr %1,%2\n" \ 88 : "d" (y0), "i" (1) : "cc", "0" ); \
89 " brc 12,0f\n" \ 89 asm volatile( \
90 " ahi %0,1\n" \ 90 " alr %1,%2\n" \
91 "0:" \ 91 " brc 12,0f\n" \
92 : "+&d" (__r2), "+&d" (__r1) \ 92 " ahi %0,1\n" \
93 : "d" (y1) : "cc" ); \ 93 "0:" \
94 : "+&d" (__r2), "+&d" (__r1) \
95 : "d" (y1) : "cc"); \
94 (r2) = __r2; \ 96 (r2) = __r2; \
95 (r1) = __r1; \ 97 (r1) = __r1; \
96 (r0) = __r0; \ 98 (r0) = __r0; \
@@ -100,21 +102,23 @@
100 unsigned int __r2 = (x2) - (y2); \ 102 unsigned int __r2 = (x2) - (y2); \
101 unsigned int __r1 = (x1); \ 103 unsigned int __r1 = (x1); \
102 unsigned int __r0 = (x0); \ 104 unsigned int __r0 = (x0); \
103 __asm__ (" slr %2,%3\n" \ 105 asm volatile( \
104 " brc 3,0f\n" \ 106 " slr %2,%3\n" \
105 " lhi 0,1\n" \ 107 " brc 3,0f\n" \
106 " slr %1,0\n" \ 108 " lhi 0,1\n" \
107 " brc 3,0f\n" \ 109 " slr %1,0\n" \
108 " slr %0,0\n" \ 110 " brc 3,0f\n" \
109 "0:" \ 111 " slr %0,0\n" \
110 : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ 112 "0:" \
111 : "d" (y0) : "cc", "0" ); \ 113 : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
112 __asm__ (" slr %1,%2\n" \ 114 : "d" (y0) : "cc", "0"); \
113 " brc 3,0f\n" \ 115 asm volatile( \
114 " ahi %0,-1\n" \ 116 " slr %1,%2\n" \
115 "0:" \ 117 " brc 3,0f\n" \
116 : "+&d" (__r2), "+&d" (__r1) \ 118 " ahi %0,-1\n" \
117 : "d" (y1) : "cc" ); \ 119 "0:" \
120 : "+&d" (__r2), "+&d" (__r1) \
121 : "d" (y1) : "cc"); \
118 (r2) = __r2; \ 122 (r2) = __r2; \
119 (r1) = __r1; \ 123 (r1) = __r1; \
120 (r0) = __r0; \ 124 (r0) = __r0; \
diff --git a/include/asm-s390/sigp.h b/include/asm-s390/sigp.h
index fc56458aff66..e16d56f8dfe1 100644
--- a/include/asm-s390/sigp.h
+++ b/include/asm-s390/sigp.h
@@ -70,16 +70,16 @@ typedef enum
70static inline sigp_ccode 70static inline sigp_ccode
71signal_processor(__u16 cpu_addr, sigp_order_code order_code) 71signal_processor(__u16 cpu_addr, sigp_order_code order_code)
72{ 72{
73 register unsigned long reg1 asm ("1") = 0;
73 sigp_ccode ccode; 74 sigp_ccode ccode;
74 75
75 __asm__ __volatile__( 76 asm volatile(
76 " sr 1,1\n" /* parameter=0 in gpr 1 */ 77 " sigp %1,%2,0(%3)\n"
77 " sigp 1,%1,0(%2)\n" 78 " ipm %0\n"
78 " ipm %0\n" 79 " srl %0,28\n"
79 " srl %0,28\n" 80 : "=d" (ccode)
80 : "=d" (ccode) 81 : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
81 : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code) 82 "a" (order_code) : "cc" , "memory");
82 : "cc" , "memory", "1" );
83 return ccode; 83 return ccode;
84} 84}
85 85
@@ -87,20 +87,18 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
87 * Signal processor with parameter 87 * Signal processor with parameter
88 */ 88 */
89static inline sigp_ccode 89static inline sigp_ccode
90signal_processor_p(__u32 parameter, __u16 cpu_addr, 90signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code)
91 sigp_order_code order_code)
92{ 91{
92 register unsigned int reg1 asm ("1") = parameter;
93 sigp_ccode ccode; 93 sigp_ccode ccode;
94 94
95 __asm__ __volatile__( 95 asm volatile(
96 " lr 1,%1\n" /* parameter in gpr 1 */ 96 " sigp %1,%2,0(%3)\n"
97 " sigp 1,%2,0(%3)\n" 97 " ipm %0\n"
98 " ipm %0\n" 98 " srl %0,28\n"
99 " srl %0,28\n"
100 : "=d" (ccode) 99 : "=d" (ccode)
101 : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), 100 : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
102 "a" (order_code) 101 "a" (order_code) : "cc" , "memory");
103 : "cc" , "memory", "1" );
104 return ccode; 102 return ccode;
105} 103}
106 104
@@ -108,24 +106,21 @@ signal_processor_p(__u32 parameter, __u16 cpu_addr,
108 * Signal processor with parameter and return status 106 * Signal processor with parameter and return status
109 */ 107 */
110static inline sigp_ccode 108static inline sigp_ccode
111signal_processor_ps(__u32 *statusptr, __u32 parameter, 109signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr,
112 __u16 cpu_addr, sigp_order_code order_code) 110 sigp_order_code order_code)
113{ 111{
112 register unsigned int reg1 asm ("1") = parameter;
114 sigp_ccode ccode; 113 sigp_ccode ccode;
115 114
116 __asm__ __volatile__( 115 asm volatile(
117 " sr 2,2\n" /* clear status */ 116 " sigp %1,%2,0(%3)\n"
118 " lr 3,%2\n" /* parameter in gpr 3 */ 117 " ipm %0\n"
119 " sigp 2,%3,0(%4)\n" 118 " srl %0,28\n"
120 " st 2,%1\n" 119 : "=d" (ccode), "+d" (reg1)
121 " ipm %0\n" 120 : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
122 " srl %0,28\n" 121 : "cc" , "memory");
123 : "=d" (ccode), "=m" (*statusptr) 122 *statusptr = reg1;
124 : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), 123 return ccode;
125 "a" (order_code)
126 : "cc" , "memory", "2" , "3"
127 );
128 return ccode;
129} 124}
130 125
131#endif /* __SIGP__ */ 126#endif /* __SIGP__ */
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 657646054c5e..c3cf030ada4d 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -56,7 +56,7 @@ static inline __u16 hard_smp_processor_id(void)
56{ 56{
57 __u16 cpu_address; 57 __u16 cpu_address;
58 58
59 __asm__ ("stap %0\n" : "=m" (cpu_address)); 59 asm volatile("stap %0" : "=m" (cpu_address));
60 return cpu_address; 60 return cpu_address;
61} 61}
62 62
@@ -104,7 +104,7 @@ smp_call_function_on(void (*func) (void *info), void *info,
104#define smp_cpu_not_running(cpu) 1 104#define smp_cpu_not_running(cpu) 1
105#define smp_get_cpu(cpu) ({ 0; }) 105#define smp_get_cpu(cpu) ({ 0; })
106#define smp_put_cpu(cpu) ({ 0; }) 106#define smp_put_cpu(cpu) ({ 0; })
107#define smp_setup_cpu_possible_map() 107#define smp_setup_cpu_possible_map() do { } while (0)
108#endif 108#endif
109 109
110#endif 110#endif
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
index 273dbecf8ace..6b78af16999b 100644
--- a/include/asm-s390/spinlock.h
+++ b/include/asm-s390/spinlock.h
@@ -11,17 +11,38 @@
11#ifndef __ASM_SPINLOCK_H 11#ifndef __ASM_SPINLOCK_H
12#define __ASM_SPINLOCK_H 12#define __ASM_SPINLOCK_H
13 13
14#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
15
16#include <linux/smp.h>
17
14static inline int 18static inline int
15_raw_compare_and_swap(volatile unsigned int *lock, 19_raw_compare_and_swap(volatile unsigned int *lock,
16 unsigned int old, unsigned int new) 20 unsigned int old, unsigned int new)
17{ 21{
18 asm volatile ("cs %0,%3,0(%4)" 22 asm volatile(
19 : "=d" (old), "=m" (*lock) 23 " cs %0,%3,%1"
20 : "0" (old), "d" (new), "a" (lock), "m" (*lock) 24 : "=d" (old), "=Q" (*lock)
21 : "cc", "memory" ); 25 : "0" (old), "d" (new), "Q" (*lock)
26 : "cc", "memory" );
22 return old; 27 return old;
23} 28}
24 29
30#else /* __GNUC__ */
31
32static inline int
33_raw_compare_and_swap(volatile unsigned int *lock,
34 unsigned int old, unsigned int new)
35{
36 asm volatile(
37 " cs %0,%3,0(%4)"
38 : "=d" (old), "=m" (*lock)
39 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
40 : "cc", "memory" );
41 return old;
42}
43
44#endif /* __GNUC__ */
45
25/* 46/*
26 * Simple spin lock operations. There are two variants, one clears IRQ's 47 * Simple spin lock operations. There are two variants, one clears IRQ's
27 * on the local processor, one does not. 48 * on the local processor, one does not.
@@ -31,34 +52,46 @@ _raw_compare_and_swap(volatile unsigned int *lock,
31 * (the type definitions are in asm/spinlock_types.h) 52 * (the type definitions are in asm/spinlock_types.h)
32 */ 53 */
33 54
34#define __raw_spin_is_locked(x) ((x)->lock != 0) 55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
35#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 56#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
36#define __raw_spin_unlock_wait(lock) \ 57#define __raw_spin_unlock_wait(lock) \
37 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 58 do { while (__raw_spin_is_locked(lock)) \
59 _raw_spin_relax(lock); } while (0)
38 60
39extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc); 61extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc);
40extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc); 62extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc);
63extern void _raw_spin_relax(raw_spinlock_t *lock);
41 64
42static inline void __raw_spin_lock(raw_spinlock_t *lp) 65static inline void __raw_spin_lock(raw_spinlock_t *lp)
43{ 66{
44 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); 67 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
45 68 int old;
46 if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0)) 69
47 _raw_spin_lock_wait(lp, pc); 70 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
71 if (likely(old == 0)) {
72 lp->owner_pc = pc;
73 return;
74 }
75 _raw_spin_lock_wait(lp, pc);
48} 76}
49 77
50static inline int __raw_spin_trylock(raw_spinlock_t *lp) 78static inline int __raw_spin_trylock(raw_spinlock_t *lp)
51{ 79{
52 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); 80 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
81 int old;
53 82
54 if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0)) 83 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
84 if (likely(old == 0)) {
85 lp->owner_pc = pc;
55 return 1; 86 return 1;
87 }
56 return _raw_spin_trylock_retry(lp, pc); 88 return _raw_spin_trylock_retry(lp, pc);
57} 89}
58 90
59static inline void __raw_spin_unlock(raw_spinlock_t *lp) 91static inline void __raw_spin_unlock(raw_spinlock_t *lp)
60{ 92{
61 _raw_compare_and_swap(&lp->lock, lp->lock, 0); 93 lp->owner_pc = 0;
94 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
62} 95}
63 96
64/* 97/*
@@ -135,4 +168,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
135 return _raw_write_trylock_retry(rw); 168 return _raw_write_trylock_retry(rw);
136} 169}
137 170
171#define _raw_read_relax(lock) cpu_relax()
172#define _raw_write_relax(lock) cpu_relax()
173
138#endif /* __ASM_SPINLOCK_H */ 174#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h
index f79a2216204f..b7ac13f7aa37 100644
--- a/include/asm-s390/spinlock_types.h
+++ b/include/asm-s390/spinlock_types.h
@@ -6,16 +6,16 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int owner_cpu;
10 volatile unsigned int owner_pc;
10} __attribute__ ((aligned (4))) raw_spinlock_t; 11} __attribute__ ((aligned (4))) raw_spinlock_t;
11 12
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 13#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 14
14typedef struct { 15typedef struct {
15 volatile unsigned int lock; 16 volatile unsigned int lock;
16 volatile unsigned int owner_pc;
17} raw_rwlock_t; 17} raw_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19#define __RAW_RW_LOCK_UNLOCKED { 0 }
20 20
21#endif 21#endif
diff --git a/include/asm-s390/string.h b/include/asm-s390/string.h
index 23a4c390489f..d074673a6d9b 100644
--- a/include/asm-s390/string.h
+++ b/include/asm-s390/string.h
@@ -60,12 +60,13 @@ static inline void *memchr(const void * s, int c, size_t n)
60 register int r0 asm("0") = (char) c; 60 register int r0 asm("0") = (char) c;
61 const void *ret = s + n; 61 const void *ret = s + n;
62 62
63 asm volatile ("0: srst %0,%1\n" 63 asm volatile(
64 " jo 0b\n" 64 "0: srst %0,%1\n"
65 " jl 1f\n" 65 " jo 0b\n"
66 " la %0,0\n" 66 " jl 1f\n"
67 "1:" 67 " la %0,0\n"
68 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 68 "1:"
69 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
69 return (void *) ret; 70 return (void *) ret;
70} 71}
71 72
@@ -74,9 +75,10 @@ static inline void *memscan(void *s, int c, size_t n)
74 register int r0 asm("0") = (char) c; 75 register int r0 asm("0") = (char) c;
75 const void *ret = s + n; 76 const void *ret = s + n;
76 77
77 asm volatile ("0: srst %0,%1\n" 78 asm volatile(
78 " jo 0b\n" 79 "0: srst %0,%1\n"
79 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 80 " jo 0b\n"
81 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
80 return (void *) ret; 82 return (void *) ret;
81} 83}
82 84
@@ -86,12 +88,13 @@ static inline char *strcat(char *dst, const char *src)
86 unsigned long dummy; 88 unsigned long dummy;
87 char *ret = dst; 89 char *ret = dst;
88 90
89 asm volatile ("0: srst %0,%1\n" 91 asm volatile(
90 " jo 0b\n" 92 "0: srst %0,%1\n"
91 "1: mvst %0,%2\n" 93 " jo 0b\n"
92 " jo 1b" 94 "1: mvst %0,%2\n"
93 : "=&a" (dummy), "+a" (dst), "+a" (src) 95 " jo 1b"
94 : "d" (r0), "0" (0) : "cc", "memory" ); 96 : "=&a" (dummy), "+a" (dst), "+a" (src)
97 : "d" (r0), "0" (0) : "cc", "memory" );
95 return ret; 98 return ret;
96} 99}
97 100
@@ -100,10 +103,11 @@ static inline char *strcpy(char *dst, const char *src)
100 register int r0 asm("0") = 0; 103 register int r0 asm("0") = 0;
101 char *ret = dst; 104 char *ret = dst;
102 105
103 asm volatile ("0: mvst %0,%1\n" 106 asm volatile(
104 " jo 0b" 107 "0: mvst %0,%1\n"
105 : "+&a" (dst), "+&a" (src) : "d" (r0) 108 " jo 0b"
106 : "cc", "memory" ); 109 : "+&a" (dst), "+&a" (src) : "d" (r0)
110 : "cc", "memory");
107 return ret; 111 return ret;
108} 112}
109 113
@@ -112,9 +116,10 @@ static inline size_t strlen(const char *s)
112 register unsigned long r0 asm("0") = 0; 116 register unsigned long r0 asm("0") = 0;
113 const char *tmp = s; 117 const char *tmp = s;
114 118
115 asm volatile ("0: srst %0,%1\n" 119 asm volatile(
116 " jo 0b" 120 "0: srst %0,%1\n"
117 : "+d" (r0), "+a" (tmp) : : "cc" ); 121 " jo 0b"
122 : "+d" (r0), "+a" (tmp) : : "cc");
118 return r0 - (unsigned long) s; 123 return r0 - (unsigned long) s;
119} 124}
120 125
@@ -124,9 +129,10 @@ static inline size_t strnlen(const char * s, size_t n)
124 const char *tmp = s; 129 const char *tmp = s;
125 const char *end = s + n; 130 const char *end = s + n;
126 131
127 asm volatile ("0: srst %0,%1\n" 132 asm volatile(
128 " jo 0b" 133 "0: srst %0,%1\n"
129 : "+a" (end), "+a" (tmp) : "d" (r0) : "cc" ); 134 " jo 0b"
135 : "+a" (end), "+a" (tmp) : "d" (r0) : "cc");
130 return end - s; 136 return end - s;
131} 137}
132 138
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 16040048cd1b..ccbafe4bf2cb 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -23,74 +23,68 @@ struct task_struct;
23 23
24extern struct task_struct *__switch_to(void *, void *); 24extern struct task_struct *__switch_to(void *, void *);
25 25
26#ifdef __s390x__
27#define __FLAG_SHIFT 56
28#else /* ! __s390x__ */
29#define __FLAG_SHIFT 24
30#endif /* ! __s390x__ */
31
32static inline void save_fp_regs(s390_fp_regs *fpregs) 26static inline void save_fp_regs(s390_fp_regs *fpregs)
33{ 27{
34 asm volatile ( 28 asm volatile(
35 " std 0,8(%1)\n" 29 " std 0,8(%1)\n"
36 " std 2,24(%1)\n" 30 " std 2,24(%1)\n"
37 " std 4,40(%1)\n" 31 " std 4,40(%1)\n"
38 " std 6,56(%1)" 32 " std 6,56(%1)"
39 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); 33 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
40 if (!MACHINE_HAS_IEEE) 34 if (!MACHINE_HAS_IEEE)
41 return; 35 return;
42 asm volatile( 36 asm volatile(
43 " stfpc 0(%1)\n" 37 " stfpc 0(%1)\n"
44 " std 1,16(%1)\n" 38 " std 1,16(%1)\n"
45 " std 3,32(%1)\n" 39 " std 3,32(%1)\n"
46 " std 5,48(%1)\n" 40 " std 5,48(%1)\n"
47 " std 7,64(%1)\n" 41 " std 7,64(%1)\n"
48 " std 8,72(%1)\n" 42 " std 8,72(%1)\n"
49 " std 9,80(%1)\n" 43 " std 9,80(%1)\n"
50 " std 10,88(%1)\n" 44 " std 10,88(%1)\n"
51 " std 11,96(%1)\n" 45 " std 11,96(%1)\n"
52 " std 12,104(%1)\n" 46 " std 12,104(%1)\n"
53 " std 13,112(%1)\n" 47 " std 13,112(%1)\n"
54 " std 14,120(%1)\n" 48 " std 14,120(%1)\n"
55 " std 15,128(%1)\n" 49 " std 15,128(%1)\n"
56 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); 50 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
57} 51}
58 52
59static inline void restore_fp_regs(s390_fp_regs *fpregs) 53static inline void restore_fp_regs(s390_fp_regs *fpregs)
60{ 54{
61 asm volatile ( 55 asm volatile(
62 " ld 0,8(%0)\n" 56 " ld 0,8(%0)\n"
63 " ld 2,24(%0)\n" 57 " ld 2,24(%0)\n"
64 " ld 4,40(%0)\n" 58 " ld 4,40(%0)\n"
65 " ld 6,56(%0)" 59 " ld 6,56(%0)"
66 : : "a" (fpregs), "m" (*fpregs) ); 60 : : "a" (fpregs), "m" (*fpregs));
67 if (!MACHINE_HAS_IEEE) 61 if (!MACHINE_HAS_IEEE)
68 return; 62 return;
69 asm volatile( 63 asm volatile(
70 " lfpc 0(%0)\n" 64 " lfpc 0(%0)\n"
71 " ld 1,16(%0)\n" 65 " ld 1,16(%0)\n"
72 " ld 3,32(%0)\n" 66 " ld 3,32(%0)\n"
73 " ld 5,48(%0)\n" 67 " ld 5,48(%0)\n"
74 " ld 7,64(%0)\n" 68 " ld 7,64(%0)\n"
75 " ld 8,72(%0)\n" 69 " ld 8,72(%0)\n"
76 " ld 9,80(%0)\n" 70 " ld 9,80(%0)\n"
77 " ld 10,88(%0)\n" 71 " ld 10,88(%0)\n"
78 " ld 11,96(%0)\n" 72 " ld 11,96(%0)\n"
79 " ld 12,104(%0)\n" 73 " ld 12,104(%0)\n"
80 " ld 13,112(%0)\n" 74 " ld 13,112(%0)\n"
81 " ld 14,120(%0)\n" 75 " ld 14,120(%0)\n"
82 " ld 15,128(%0)\n" 76 " ld 15,128(%0)\n"
83 : : "a" (fpregs), "m" (*fpregs) ); 77 : : "a" (fpregs), "m" (*fpregs));
84} 78}
85 79
86static inline void save_access_regs(unsigned int *acrs) 80static inline void save_access_regs(unsigned int *acrs)
87{ 81{
88 asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" ); 82 asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory");
89} 83}
90 84
91static inline void restore_access_regs(unsigned int *acrs) 85static inline void restore_access_regs(unsigned int *acrs)
92{ 86{
93 asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) ); 87 asm volatile("lam 0,15,0(%0)" : : "a" (acrs));
94} 88}
95 89
96#define switch_to(prev,next,last) do { \ 90#define switch_to(prev,next,last) do { \
@@ -126,7 +120,7 @@ extern void account_system_vtime(struct task_struct *);
126 account_vtime(prev); \ 120 account_vtime(prev); \
127} while (0) 121} while (0)
128 122
129#define nop() __asm__ __volatile__ ("nop") 123#define nop() asm volatile("nop")
130 124
131#define xchg(ptr,x) \ 125#define xchg(ptr,x) \
132({ \ 126({ \
@@ -147,15 +141,15 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
147 shift = (3 ^ (addr & 3)) << 3; 141 shift = (3 ^ (addr & 3)) << 3;
148 addr ^= addr & 3; 142 addr ^= addr & 3;
149 asm volatile( 143 asm volatile(
150 " l %0,0(%4)\n" 144 " l %0,0(%4)\n"
151 "0: lr 0,%0\n" 145 "0: lr 0,%0\n"
152 " nr 0,%3\n" 146 " nr 0,%3\n"
153 " or 0,%2\n" 147 " or 0,%2\n"
154 " cs %0,0,0(%4)\n" 148 " cs %0,0,0(%4)\n"
155 " jl 0b\n" 149 " jl 0b\n"
156 : "=&d" (old), "=m" (*(int *) addr) 150 : "=&d" (old), "=m" (*(int *) addr)
157 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), 151 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
158 "m" (*(int *) addr) : "memory", "cc", "0" ); 152 "m" (*(int *) addr) : "memory", "cc", "0");
159 x = old >> shift; 153 x = old >> shift;
160 break; 154 break;
161 case 2: 155 case 2:
@@ -163,36 +157,36 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
163 shift = (2 ^ (addr & 2)) << 3; 157 shift = (2 ^ (addr & 2)) << 3;
164 addr ^= addr & 2; 158 addr ^= addr & 2;
165 asm volatile( 159 asm volatile(
166 " l %0,0(%4)\n" 160 " l %0,0(%4)\n"
167 "0: lr 0,%0\n" 161 "0: lr 0,%0\n"
168 " nr 0,%3\n" 162 " nr 0,%3\n"
169 " or 0,%2\n" 163 " or 0,%2\n"
170 " cs %0,0,0(%4)\n" 164 " cs %0,0,0(%4)\n"
171 " jl 0b\n" 165 " jl 0b\n"
172 : "=&d" (old), "=m" (*(int *) addr) 166 : "=&d" (old), "=m" (*(int *) addr)
173 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), 167 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
174 "m" (*(int *) addr) : "memory", "cc", "0" ); 168 "m" (*(int *) addr) : "memory", "cc", "0");
175 x = old >> shift; 169 x = old >> shift;
176 break; 170 break;
177 case 4: 171 case 4:
178 asm volatile ( 172 asm volatile(
179 " l %0,0(%3)\n" 173 " l %0,0(%3)\n"
180 "0: cs %0,%2,0(%3)\n" 174 "0: cs %0,%2,0(%3)\n"
181 " jl 0b\n" 175 " jl 0b\n"
182 : "=&d" (old), "=m" (*(int *) ptr) 176 : "=&d" (old), "=m" (*(int *) ptr)
183 : "d" (x), "a" (ptr), "m" (*(int *) ptr) 177 : "d" (x), "a" (ptr), "m" (*(int *) ptr)
184 : "memory", "cc" ); 178 : "memory", "cc");
185 x = old; 179 x = old;
186 break; 180 break;
187#ifdef __s390x__ 181#ifdef __s390x__
188 case 8: 182 case 8:
189 asm volatile ( 183 asm volatile(
190 " lg %0,0(%3)\n" 184 " lg %0,0(%3)\n"
191 "0: csg %0,%2,0(%3)\n" 185 "0: csg %0,%2,0(%3)\n"
192 " jl 0b\n" 186 " jl 0b\n"
193 : "=&d" (old), "=m" (*(long *) ptr) 187 : "=&d" (old), "=m" (*(long *) ptr)
194 : "d" (x), "a" (ptr), "m" (*(long *) ptr) 188 : "d" (x), "a" (ptr), "m" (*(long *) ptr)
195 : "memory", "cc" ); 189 : "memory", "cc");
196 x = old; 190 x = old;
197 break; 191 break;
198#endif /* __s390x__ */ 192#endif /* __s390x__ */
@@ -224,55 +218,55 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
224 shift = (3 ^ (addr & 3)) << 3; 218 shift = (3 ^ (addr & 3)) << 3;
225 addr ^= addr & 3; 219 addr ^= addr & 3;
226 asm volatile( 220 asm volatile(
227 " l %0,0(%4)\n" 221 " l %0,0(%4)\n"
228 "0: nr %0,%5\n" 222 "0: nr %0,%5\n"
229 " lr %1,%0\n" 223 " lr %1,%0\n"
230 " or %0,%2\n" 224 " or %0,%2\n"
231 " or %1,%3\n" 225 " or %1,%3\n"
232 " cs %0,%1,0(%4)\n" 226 " cs %0,%1,0(%4)\n"
233 " jnl 1f\n" 227 " jnl 1f\n"
234 " xr %1,%0\n" 228 " xr %1,%0\n"
235 " nr %1,%5\n" 229 " nr %1,%5\n"
236 " jnz 0b\n" 230 " jnz 0b\n"
237 "1:" 231 "1:"
238 : "=&d" (prev), "=&d" (tmp) 232 : "=&d" (prev), "=&d" (tmp)
239 : "d" (old << shift), "d" (new << shift), "a" (ptr), 233 : "d" (old << shift), "d" (new << shift), "a" (ptr),
240 "d" (~(255 << shift)) 234 "d" (~(255 << shift))
241 : "memory", "cc" ); 235 : "memory", "cc");
242 return prev >> shift; 236 return prev >> shift;
243 case 2: 237 case 2:
244 addr = (unsigned long) ptr; 238 addr = (unsigned long) ptr;
245 shift = (2 ^ (addr & 2)) << 3; 239 shift = (2 ^ (addr & 2)) << 3;
246 addr ^= addr & 2; 240 addr ^= addr & 2;
247 asm volatile( 241 asm volatile(
248 " l %0,0(%4)\n" 242 " l %0,0(%4)\n"
249 "0: nr %0,%5\n" 243 "0: nr %0,%5\n"
250 " lr %1,%0\n" 244 " lr %1,%0\n"
251 " or %0,%2\n" 245 " or %0,%2\n"
252 " or %1,%3\n" 246 " or %1,%3\n"
253 " cs %0,%1,0(%4)\n" 247 " cs %0,%1,0(%4)\n"
254 " jnl 1f\n" 248 " jnl 1f\n"
255 " xr %1,%0\n" 249 " xr %1,%0\n"
256 " nr %1,%5\n" 250 " nr %1,%5\n"
257 " jnz 0b\n" 251 " jnz 0b\n"
258 "1:" 252 "1:"
259 : "=&d" (prev), "=&d" (tmp) 253 : "=&d" (prev), "=&d" (tmp)
260 : "d" (old << shift), "d" (new << shift), "a" (ptr), 254 : "d" (old << shift), "d" (new << shift), "a" (ptr),
261 "d" (~(65535 << shift)) 255 "d" (~(65535 << shift))
262 : "memory", "cc" ); 256 : "memory", "cc");
263 return prev >> shift; 257 return prev >> shift;
264 case 4: 258 case 4:
265 asm volatile ( 259 asm volatile(
266 " cs %0,%2,0(%3)\n" 260 " cs %0,%2,0(%3)\n"
267 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 261 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
268 : "memory", "cc" ); 262 : "memory", "cc");
269 return prev; 263 return prev;
270#ifdef __s390x__ 264#ifdef __s390x__
271 case 8: 265 case 8:
272 asm volatile ( 266 asm volatile(
273 " csg %0,%2,0(%3)\n" 267 " csg %0,%2,0(%3)\n"
274 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 268 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
275 : "memory", "cc" ); 269 : "memory", "cc");
276 return prev; 270 return prev;
277#endif /* __s390x__ */ 271#endif /* __s390x__ */
278 } 272 }
@@ -289,8 +283,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
289 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). 283 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
290 */ 284 */
291 285
292#define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" ) 286#define eieio() asm volatile("bcr 15,0" : : : "memory")
293# define SYNC_OTHER_CORES(x) eieio() 287#define SYNC_OTHER_CORES(x) eieio()
294#define mb() eieio() 288#define mb() eieio()
295#define rmb() eieio() 289#define rmb() eieio()
296#define wmb() eieio() 290#define wmb() eieio()
@@ -307,117 +301,56 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
307 301
308#ifdef __s390x__ 302#ifdef __s390x__
309 303
310#define __ctl_load(array, low, high) ({ \ 304#define __ctl_load(array, low, high) ({ \
311 typedef struct { char _[sizeof(array)]; } addrtype; \ 305 typedef struct { char _[sizeof(array)]; } addrtype; \
312 __asm__ __volatile__ ( \ 306 asm volatile( \
313 " bras 1,0f\n" \ 307 " lctlg %1,%2,0(%0)\n" \
314 " lctlg 0,0,0(%0)\n" \ 308 : : "a" (&array), "i" (low), "i" (high), \
315 "0: ex %1,0(1)" \ 309 "m" (*(addrtype *)(array))); \
316 : : "a" (&array), "a" (((low)<<4)+(high)), \
317 "m" (*(addrtype *)(array)) : "1" ); \
318 }) 310 })
319 311
320#define __ctl_store(array, low, high) ({ \ 312#define __ctl_store(array, low, high) ({ \
321 typedef struct { char _[sizeof(array)]; } addrtype; \ 313 typedef struct { char _[sizeof(array)]; } addrtype; \
322 __asm__ __volatile__ ( \ 314 asm volatile( \
323 " bras 1,0f\n" \ 315 " stctg %2,%3,0(%1)\n" \
324 " stctg 0,0,0(%1)\n" \ 316 : "=m" (*(addrtype *)(array)) \
325 "0: ex %2,0(1)" \ 317 : "a" (&array), "i" (low), "i" (high)); \
326 : "=m" (*(addrtype *)(array)) \
327 : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
328 }) 318 })
329 319
330#define __ctl_set_bit(cr, bit) ({ \
331 __u8 __dummy[24]; \
332 __asm__ __volatile__ ( \
333 " bras 1,0f\n" /* skip indirect insns */ \
334 " stctg 0,0,0(%1)\n" \
335 " lctlg 0,0,0(%1)\n" \
336 "0: ex %2,0(1)\n" /* execute stctl */ \
337 " lg 0,0(%1)\n" \
338 " ogr 0,%3\n" /* set the bit */ \
339 " stg 0,0(%1)\n" \
340 "1: ex %2,6(1)" /* execute lctl */ \
341 : "=m" (__dummy) \
342 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
343 "a" (cr*17), "a" (1L<<(bit)) \
344 : "cc", "0", "1" ); \
345 })
346
347#define __ctl_clear_bit(cr, bit) ({ \
348 __u8 __dummy[16]; \
349 __asm__ __volatile__ ( \
350 " bras 1,0f\n" /* skip indirect insns */ \
351 " stctg 0,0,0(%1)\n" \
352 " lctlg 0,0,0(%1)\n" \
353 "0: ex %2,0(1)\n" /* execute stctl */ \
354 " lg 0,0(%1)\n" \
355 " ngr 0,%3\n" /* set the bit */ \
356 " stg 0,0(%1)\n" \
357 "1: ex %2,6(1)" /* execute lctl */ \
358 : "=m" (__dummy) \
359 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
360 "a" (cr*17), "a" (~(1L<<(bit))) \
361 : "cc", "0", "1" ); \
362 })
363
364#else /* __s390x__ */ 320#else /* __s390x__ */
365 321
366#define __ctl_load(array, low, high) ({ \ 322#define __ctl_load(array, low, high) ({ \
367 typedef struct { char _[sizeof(array)]; } addrtype; \ 323 typedef struct { char _[sizeof(array)]; } addrtype; \
368 __asm__ __volatile__ ( \ 324 asm volatile( \
369 " bras 1,0f\n" \ 325 " lctl %1,%2,0(%0)\n" \
370 " lctl 0,0,0(%0)\n" \ 326 : : "a" (&array), "i" (low), "i" (high), \
371 "0: ex %1,0(1)" \ 327 "m" (*(addrtype *)(array))); \
372 : : "a" (&array), "a" (((low)<<4)+(high)), \ 328})
373 "m" (*(addrtype *)(array)) : "1" ); \
374 })
375 329
376#define __ctl_store(array, low, high) ({ \ 330#define __ctl_store(array, low, high) ({ \
377 typedef struct { char _[sizeof(array)]; } addrtype; \ 331 typedef struct { char _[sizeof(array)]; } addrtype; \
378 __asm__ __volatile__ ( \ 332 asm volatile( \
379 " bras 1,0f\n" \ 333 " stctl %2,%3,0(%1)\n" \
380 " stctl 0,0,0(%1)\n" \ 334 : "=m" (*(addrtype *)(array)) \
381 "0: ex %2,0(1)" \ 335 : "a" (&array), "i" (low), "i" (high)); \
382 : "=m" (*(addrtype *)(array)) \
383 : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
384 }) 336 })
385 337
386#define __ctl_set_bit(cr, bit) ({ \
387 __u8 __dummy[16]; \
388 __asm__ __volatile__ ( \
389 " bras 1,0f\n" /* skip indirect insns */ \
390 " stctl 0,0,0(%1)\n" \
391 " lctl 0,0,0(%1)\n" \
392 "0: ex %2,0(1)\n" /* execute stctl */ \
393 " l 0,0(%1)\n" \
394 " or 0,%3\n" /* set the bit */ \
395 " st 0,0(%1)\n" \
396 "1: ex %2,4(1)" /* execute lctl */ \
397 : "=m" (__dummy) \
398 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
399 "a" (cr*17), "a" (1<<(bit)) \
400 : "cc", "0", "1" ); \
401 })
402
403#define __ctl_clear_bit(cr, bit) ({ \
404 __u8 __dummy[16]; \
405 __asm__ __volatile__ ( \
406 " bras 1,0f\n" /* skip indirect insns */ \
407 " stctl 0,0,0(%1)\n" \
408 " lctl 0,0,0(%1)\n" \
409 "0: ex %2,0(1)\n" /* execute stctl */ \
410 " l 0,0(%1)\n" \
411 " nr 0,%3\n" /* set the bit */ \
412 " st 0,0(%1)\n" \
413 "1: ex %2,4(1)" /* execute lctl */ \
414 : "=m" (__dummy) \
415 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
416 "a" (cr*17), "a" (~(1<<(bit))) \
417 : "cc", "0", "1" ); \
418 })
419#endif /* __s390x__ */ 338#endif /* __s390x__ */
420 339
340#define __ctl_set_bit(cr, bit) ({ \
341 unsigned long __dummy; \
342 __ctl_store(__dummy, cr, cr); \
343 __dummy |= 1UL << (bit); \
344 __ctl_load(__dummy, cr, cr); \
345})
346
347#define __ctl_clear_bit(cr, bit) ({ \
348 unsigned long __dummy; \
349 __ctl_store(__dummy, cr, cr); \
350 __dummy &= ~(1UL << (bit)); \
351 __ctl_load(__dummy, cr, cr); \
352})
353
421#include <linux/irqflags.h> 354#include <linux/irqflags.h>
422 355
423/* 356/*
@@ -427,8 +360,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
427static inline void 360static inline void
428__set_psw_mask(unsigned long mask) 361__set_psw_mask(unsigned long mask)
429{ 362{
430 local_save_flags(mask); 363 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
431 __load_psw_mask(mask);
432} 364}
433 365
434#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) 366#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 5d0332a4c2bd..4df4a41029a3 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -15,20 +15,21 @@
15 15
16typedef unsigned long long cycles_t; 16typedef unsigned long long cycles_t;
17 17
18static inline cycles_t get_cycles(void)
19{
20 cycles_t cycles;
21
22 __asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");
23 return cycles >> 2;
24}
25
26static inline unsigned long long get_clock (void) 18static inline unsigned long long get_clock (void)
27{ 19{
28 unsigned long long clk; 20 unsigned long long clk;
29 21
30 __asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 22#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
23 asm volatile("stck %0" : "=Q" (clk) : : "cc");
24#else /* __GNUC__ */
25 asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
26#endif /* __GNUC__ */
31 return clk; 27 return clk;
32} 28}
33 29
30static inline cycles_t get_cycles(void)
31{
32 return (cycles_t) get_clock() >> 2;
33}
34
34#endif 35#endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 73cd85bebfb2..fa4dc916a9bf 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -25,7 +25,7 @@
25 */ 25 */
26 26
27#define local_flush_tlb() \ 27#define local_flush_tlb() \
28do { __asm__ __volatile__("ptlb": : :"memory"); } while (0) 28do { asm volatile("ptlb": : :"memory"); } while (0)
29 29
30#ifndef CONFIG_SMP 30#ifndef CONFIG_SMP
31 31
@@ -68,24 +68,24 @@ extern void smp_ptlb_all(void);
68 68
69static inline void global_flush_tlb(void) 69static inline void global_flush_tlb(void)
70{ 70{
71 register unsigned long reg2 asm("2");
72 register unsigned long reg3 asm("3");
73 register unsigned long reg4 asm("4");
74 long dummy;
75
71#ifndef __s390x__ 76#ifndef __s390x__
72 if (!MACHINE_HAS_CSP) { 77 if (!MACHINE_HAS_CSP) {
73 smp_ptlb_all(); 78 smp_ptlb_all();
74 return; 79 return;
75 } 80 }
76#endif /* __s390x__ */ 81#endif /* __s390x__ */
77 { 82
78 register unsigned long addr asm("4"); 83 dummy = 0;
79 long dummy; 84 reg2 = reg3 = 0;
80 85 reg4 = ((unsigned long) &dummy) + 1;
81 dummy = 0; 86 asm volatile(
82 addr = ((unsigned long) &dummy) + 1; 87 " csp %0,%2"
83 __asm__ __volatile__ ( 88 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
84 " slr 2,2\n"
85 " slr 3,3\n"
86 " csp 2,%0"
87 : : "a" (addr), "m" (dummy) : "cc", "2", "3" );
88 }
89} 89}
90 90
91/* 91/*
@@ -102,9 +102,9 @@ static inline void __flush_tlb_mm(struct mm_struct * mm)
102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 102 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return; 103 return;
104 if (MACHINE_HAS_IDTE) { 104 if (MACHINE_HAS_IDTE) {
105 asm volatile (".insn rrf,0xb98e0000,0,%0,%1,0" 105 asm volatile(
106 : : "a" (2048), 106 " .insn rrf,0xb98e0000,0,%0,%1,0"
107 "a" (__pa(mm->pgd)&PAGE_MASK) : "cc" ); 107 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
108 return; 108 return;
109 } 109 }
110 preempt_disable(); 110 preempt_disable();
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 0b7c0ca4c3d7..72ae4efddb49 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -38,25 +38,14 @@
38#define get_ds() (KERNEL_DS) 38#define get_ds() (KERNEL_DS)
39#define get_fs() (current->thread.mm_segment) 39#define get_fs() (current->thread.mm_segment)
40 40
41#ifdef __s390x__
42#define set_fs(x) \ 41#define set_fs(x) \
43({ \ 42({ \
44 unsigned long __pto; \ 43 unsigned long __pto; \
45 current->thread.mm_segment = (x); \ 44 current->thread.mm_segment = (x); \
46 __pto = current->thread.mm_segment.ar4 ? \ 45 __pto = current->thread.mm_segment.ar4 ? \
47 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 46 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
48 asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ 47 __ctl_load(__pto, 7, 7); \
49}) 48})
50#else
51#define set_fs(x) \
52({ \
53 unsigned long __pto; \
54 current->thread.mm_segment = (x); \
55 __pto = current->thread.mm_segment.ar4 ? \
56 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
57 asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \
58})
59#endif
60 49
61#define segment_eq(a,b) ((a).ar4 == (b).ar4) 50#define segment_eq(a,b) ((a).ar4 == (b).ar4)
62 51
@@ -85,76 +74,51 @@ struct exception_table_entry
85 unsigned long insn, fixup; 74 unsigned long insn, fixup;
86}; 75};
87 76
88#ifndef __s390x__ 77struct uaccess_ops {
89#define __uaccess_fixup \ 78 size_t (*copy_from_user)(size_t, const void __user *, void *);
90 ".section .fixup,\"ax\"\n" \ 79 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
91 "2: lhi %0,%4\n" \ 80 size_t (*copy_to_user)(size_t, void __user *, const void *);
92 " bras 1,3f\n" \ 81 size_t (*copy_to_user_small)(size_t, void __user *, const void *);
93 " .long 1b\n" \ 82 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
94 "3: l 1,0(1)\n" \ 83 size_t (*clear_user)(size_t, void __user *);
95 " br 1\n" \ 84 size_t (*strnlen_user)(size_t, const char __user *);
96 ".previous\n" \ 85 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
97 ".section __ex_table,\"a\"\n" \ 86 int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
98 " .align 4\n" \ 87 int (*futex_atomic_cmpxchg)(int __user *, int old, int new);
99 " .long 0b,2b\n" \ 88};
100 ".previous" 89
101#define __uaccess_clobber "cc", "1" 90extern struct uaccess_ops uaccess;
102#else /* __s390x__ */ 91extern struct uaccess_ops uaccess_std;
103#define __uaccess_fixup \ 92extern struct uaccess_ops uaccess_mvcos;
104 ".section .fixup,\"ax\"\n" \ 93
105 "2: lghi %0,%4\n" \ 94static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
106 " jg 1b\n" \ 95{
107 ".previous\n" \ 96 size = uaccess.copy_to_user_small(size, ptr, x);
108 ".section __ex_table,\"a\"\n" \ 97 return size ? -EFAULT : size;
109 " .align 8\n" \ 98}
110 " .quad 0b,2b\n" \ 99
111 ".previous" 100static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
112#define __uaccess_clobber "cc" 101{
113#endif /* __s390x__ */ 102 size = uaccess.copy_from_user_small(size, ptr, x);
103 return size ? -EFAULT : size;
104}
114 105
115/* 106/*
116 * These are the main single-value transfer routines. They automatically 107 * These are the main single-value transfer routines. They automatically
117 * use the right size if we just have the right pointer type. 108 * use the right size if we just have the right pointer type.
118 */ 109 */
119#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
120#define __put_user_asm(x, ptr, err) \
121({ \
122 err = 0; \
123 asm volatile( \
124 "0: mvcs 0(%1,%2),%3,%0\n" \
125 "1:\n" \
126 __uaccess_fixup \
127 : "+&d" (err) \
128 : "d" (sizeof(*(ptr))), "a" (ptr), "Q" (x), \
129 "K" (-EFAULT) \
130 : __uaccess_clobber ); \
131})
132#else
133#define __put_user_asm(x, ptr, err) \
134({ \
135 err = 0; \
136 asm volatile( \
137 "0: mvcs 0(%1,%2),0(%3),%0\n" \
138 "1:\n" \
139 __uaccess_fixup \
140 : "+&d" (err) \
141 : "d" (sizeof(*(ptr))), "a" (ptr), "a" (&(x)), \
142 "K" (-EFAULT), "m" (x) \
143 : __uaccess_clobber ); \
144})
145#endif
146
147#define __put_user(x, ptr) \ 110#define __put_user(x, ptr) \
148({ \ 111({ \
149 __typeof__(*(ptr)) __x = (x); \ 112 __typeof__(*(ptr)) __x = (x); \
150 int __pu_err; \ 113 int __pu_err = -EFAULT; \
151 __chk_user_ptr(ptr); \ 114 __chk_user_ptr(ptr); \
152 switch (sizeof (*(ptr))) { \ 115 switch (sizeof (*(ptr))) { \
153 case 1: \ 116 case 1: \
154 case 2: \ 117 case 2: \
155 case 4: \ 118 case 4: \
156 case 8: \ 119 case 8: \
157 __put_user_asm(__x, ptr, __pu_err); \ 120 __pu_err = __put_user_fn(sizeof (*(ptr)), \
121 ptr, &__x); \
158 break; \ 122 break; \
159 default: \ 123 default: \
160 __put_user_bad(); \ 124 __put_user_bad(); \
@@ -172,60 +136,36 @@ struct exception_table_entry
172 136
173extern int __put_user_bad(void) __attribute__((noreturn)); 137extern int __put_user_bad(void) __attribute__((noreturn));
174 138
175#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
176#define __get_user_asm(x, ptr, err) \
177({ \
178 err = 0; \
179 asm volatile ( \
180 "0: mvcp %O1(%2,%R1),0(%3),%0\n" \
181 "1:\n" \
182 __uaccess_fixup \
183 : "+&d" (err), "=Q" (x) \
184 : "d" (sizeof(*(ptr))), "a" (ptr), \
185 "K" (-EFAULT) \
186 : __uaccess_clobber ); \
187})
188#else
189#define __get_user_asm(x, ptr, err) \
190({ \
191 err = 0; \
192 asm volatile ( \
193 "0: mvcp 0(%2,%5),0(%3),%0\n" \
194 "1:\n" \
195 __uaccess_fixup \
196 : "+&d" (err), "=m" (x) \
197 : "d" (sizeof(*(ptr))), "a" (ptr), \
198 "K" (-EFAULT), "a" (&(x)) \
199 : __uaccess_clobber ); \
200})
201#endif
202
203#define __get_user(x, ptr) \ 139#define __get_user(x, ptr) \
204({ \ 140({ \
205 int __gu_err; \ 141 int __gu_err = -EFAULT; \
206 __chk_user_ptr(ptr); \ 142 __chk_user_ptr(ptr); \
207 switch (sizeof(*(ptr))) { \ 143 switch (sizeof(*(ptr))) { \
208 case 1: { \ 144 case 1: { \
209 unsigned char __x; \ 145 unsigned char __x; \
210 __get_user_asm(__x, ptr, __gu_err); \ 146 __gu_err = __get_user_fn(sizeof (*(ptr)), \
147 ptr, &__x); \
211 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 148 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
212 break; \ 149 break; \
213 }; \ 150 }; \
214 case 2: { \ 151 case 2: { \
215 unsigned short __x; \ 152 unsigned short __x; \
216 __get_user_asm(__x, ptr, __gu_err); \ 153 __gu_err = __get_user_fn(sizeof (*(ptr)), \
154 ptr, &__x); \
217 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 155 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
218 break; \ 156 break; \
219 }; \ 157 }; \
220 case 4: { \ 158 case 4: { \
221 unsigned int __x; \ 159 unsigned int __x; \
222 __get_user_asm(__x, ptr, __gu_err); \ 160 __gu_err = __get_user_fn(sizeof (*(ptr)), \
161 ptr, &__x); \
223 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 162 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
224 break; \ 163 break; \
225 }; \ 164 }; \
226 case 8: { \ 165 case 8: { \
227 unsigned long long __x; \ 166 unsigned long long __x; \
228 __get_user_asm(__x, ptr, __gu_err); \ 167 __gu_err = __get_user_fn(sizeof (*(ptr)), \
168 ptr, &__x); \
229 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 169 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
230 break; \ 170 break; \
231 }; \ 171 }; \
@@ -247,8 +187,6 @@ extern int __get_user_bad(void) __attribute__((noreturn));
247#define __put_user_unaligned __put_user 187#define __put_user_unaligned __put_user
248#define __get_user_unaligned __get_user 188#define __get_user_unaligned __get_user
249 189
250extern long __copy_to_user_asm(const void *from, long n, void __user *to);
251
252/** 190/**
253 * __copy_to_user: - Copy a block of data into user space, with less checking. 191 * __copy_to_user: - Copy a block of data into user space, with less checking.
254 * @to: Destination address, in user space. 192 * @to: Destination address, in user space.
@@ -266,7 +204,10 @@ extern long __copy_to_user_asm(const void *from, long n, void __user *to);
266static inline unsigned long 204static inline unsigned long
267__copy_to_user(void __user *to, const void *from, unsigned long n) 205__copy_to_user(void __user *to, const void *from, unsigned long n)
268{ 206{
269 return __copy_to_user_asm(from, n, to); 207 if (__builtin_constant_p(n) && (n <= 256))
208 return uaccess.copy_to_user_small(n, to, from);
209 else
210 return uaccess.copy_to_user(n, to, from);
270} 211}
271 212
272#define __copy_to_user_inatomic __copy_to_user 213#define __copy_to_user_inatomic __copy_to_user
@@ -294,8 +235,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
294 return n; 235 return n;
295} 236}
296 237
297extern long __copy_from_user_asm(void *to, long n, const void __user *from);
298
299/** 238/**
300 * __copy_from_user: - Copy a block of data from user space, with less checking. 239 * __copy_from_user: - Copy a block of data from user space, with less checking.
301 * @to: Destination address, in kernel space. 240 * @to: Destination address, in kernel space.
@@ -316,7 +255,10 @@ extern long __copy_from_user_asm(void *to, long n, const void __user *from);
316static inline unsigned long 255static inline unsigned long
317__copy_from_user(void *to, const void __user *from, unsigned long n) 256__copy_from_user(void *to, const void __user *from, unsigned long n)
318{ 257{
319 return __copy_from_user_asm(to, n, from); 258 if (__builtin_constant_p(n) && (n <= 256))
259 return uaccess.copy_from_user_small(n, from, to);
260 else
261 return uaccess.copy_from_user(n, from, to);
320} 262}
321 263
322/** 264/**
@@ -346,13 +288,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
346 return n; 288 return n;
347} 289}
348 290
349extern unsigned long __copy_in_user_asm(const void __user *from, long n,
350 void __user *to);
351
352static inline unsigned long 291static inline unsigned long
353__copy_in_user(void __user *to, const void __user *from, unsigned long n) 292__copy_in_user(void __user *to, const void __user *from, unsigned long n)
354{ 293{
355 return __copy_in_user_asm(from, n, to); 294 return uaccess.copy_in_user(n, to, from);
356} 295}
357 296
358static inline unsigned long 297static inline unsigned long
@@ -360,34 +299,28 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
360{ 299{
361 might_sleep(); 300 might_sleep();
362 if (__access_ok(from,n) && __access_ok(to,n)) 301 if (__access_ok(from,n) && __access_ok(to,n))
363 n = __copy_in_user_asm(from, n, to); 302 n = __copy_in_user(to, from, n);
364 return n; 303 return n;
365} 304}
366 305
367/* 306/*
368 * Copy a null terminated string from userspace. 307 * Copy a null terminated string from userspace.
369 */ 308 */
370extern long __strncpy_from_user_asm(long count, char *dst,
371 const char __user *src);
372
373static inline long 309static inline long
374strncpy_from_user(char *dst, const char __user *src, long count) 310strncpy_from_user(char *dst, const char __user *src, long count)
375{ 311{
376 long res = -EFAULT; 312 long res = -EFAULT;
377 might_sleep(); 313 might_sleep();
378 if (access_ok(VERIFY_READ, src, 1)) 314 if (access_ok(VERIFY_READ, src, 1))
379 res = __strncpy_from_user_asm(count, dst, src); 315 res = uaccess.strncpy_from_user(count, src, dst);
380 return res; 316 return res;
381} 317}
382 318
383
384extern long __strnlen_user_asm(long count, const char __user *src);
385
386static inline unsigned long 319static inline unsigned long
387strnlen_user(const char __user * src, unsigned long n) 320strnlen_user(const char __user * src, unsigned long n)
388{ 321{
389 might_sleep(); 322 might_sleep();
390 return __strnlen_user_asm(n, src); 323 return uaccess.strnlen_user(n, src);
391} 324}
392 325
393/** 326/**
@@ -410,12 +343,10 @@ strnlen_user(const char __user * src, unsigned long n)
410 * Zero Userspace 343 * Zero Userspace
411 */ 344 */
412 345
413extern long __clear_user_asm(void __user *to, long n);
414
415static inline unsigned long 346static inline unsigned long
416__clear_user(void __user *to, unsigned long n) 347__clear_user(void __user *to, unsigned long n)
417{ 348{
418 return __clear_user_asm(to, n); 349 return uaccess.clear_user(n, to);
419} 350}
420 351
421static inline unsigned long 352static inline unsigned long
@@ -423,7 +354,7 @@ clear_user(void __user *to, unsigned long n)
423{ 354{
424 might_sleep(); 355 might_sleep();
425 if (access_ok(VERIFY_WRITE, to, n)) 356 if (access_ok(VERIFY_WRITE, to, n))
426 n = __clear_user_asm(to, n); 357 n = uaccess.clear_user(n, to);
427 return n; 358 return n;
428} 359}
429 360
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index aa7a243862e1..0361ac5dcde3 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -25,17 +25,12 @@
25#define __NR_unlink 10 25#define __NR_unlink 10
26#define __NR_execve 11 26#define __NR_execve 11
27#define __NR_chdir 12 27#define __NR_chdir 12
28#define __NR_time 13
29#define __NR_mknod 14 28#define __NR_mknod 14
30#define __NR_chmod 15 29#define __NR_chmod 15
31#define __NR_lchown 16
32#define __NR_lseek 19 30#define __NR_lseek 19
33#define __NR_getpid 20 31#define __NR_getpid 20
34#define __NR_mount 21 32#define __NR_mount 21
35#define __NR_umount 22 33#define __NR_umount 22
36#define __NR_setuid 23
37#define __NR_getuid 24
38#define __NR_stime 25
39#define __NR_ptrace 26 34#define __NR_ptrace 26
40#define __NR_alarm 27 35#define __NR_alarm 27
41#define __NR_pause 29 36#define __NR_pause 29
@@ -51,11 +46,7 @@
51#define __NR_pipe 42 46#define __NR_pipe 42
52#define __NR_times 43 47#define __NR_times 43
53#define __NR_brk 45 48#define __NR_brk 45
54#define __NR_setgid 46
55#define __NR_getgid 47
56#define __NR_signal 48 49#define __NR_signal 48
57#define __NR_geteuid 49
58#define __NR_getegid 50
59#define __NR_acct 51 50#define __NR_acct 51
60#define __NR_umount2 52 51#define __NR_umount2 52
61#define __NR_ioctl 54 52#define __NR_ioctl 54
@@ -69,18 +60,13 @@
69#define __NR_getpgrp 65 60#define __NR_getpgrp 65
70#define __NR_setsid 66 61#define __NR_setsid 66
71#define __NR_sigaction 67 62#define __NR_sigaction 67
72#define __NR_setreuid 70
73#define __NR_setregid 71
74#define __NR_sigsuspend 72 63#define __NR_sigsuspend 72
75#define __NR_sigpending 73 64#define __NR_sigpending 73
76#define __NR_sethostname 74 65#define __NR_sethostname 74
77#define __NR_setrlimit 75 66#define __NR_setrlimit 75
78#define __NR_getrlimit 76
79#define __NR_getrusage 77 67#define __NR_getrusage 77
80#define __NR_gettimeofday 78 68#define __NR_gettimeofday 78
81#define __NR_settimeofday 79 69#define __NR_settimeofday 79
82#define __NR_getgroups 80
83#define __NR_setgroups 81
84#define __NR_symlink 83 70#define __NR_symlink 83
85#define __NR_readlink 85 71#define __NR_readlink 85
86#define __NR_uselib 86 72#define __NR_uselib 86
@@ -92,12 +78,10 @@
92#define __NR_truncate 92 78#define __NR_truncate 92
93#define __NR_ftruncate 93 79#define __NR_ftruncate 93
94#define __NR_fchmod 94 80#define __NR_fchmod 94
95#define __NR_fchown 95
96#define __NR_getpriority 96 81#define __NR_getpriority 96
97#define __NR_setpriority 97 82#define __NR_setpriority 97
98#define __NR_statfs 99 83#define __NR_statfs 99
99#define __NR_fstatfs 100 84#define __NR_fstatfs 100
100#define __NR_ioperm 101
101#define __NR_socketcall 102 85#define __NR_socketcall 102
102#define __NR_syslog 103 86#define __NR_syslog 103
103#define __NR_setitimer 104 87#define __NR_setitimer 104
@@ -131,11 +115,7 @@
131#define __NR_sysfs 135 115#define __NR_sysfs 135
132#define __NR_personality 136 116#define __NR_personality 136
133#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ 117#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
134#define __NR_setfsuid 138
135#define __NR_setfsgid 139
136#define __NR__llseek 140
137#define __NR_getdents 141 118#define __NR_getdents 141
138#define __NR__newselect 142
139#define __NR_flock 143 119#define __NR_flock 143
140#define __NR_msync 144 120#define __NR_msync 144
141#define __NR_readv 145 121#define __NR_readv 145
@@ -157,13 +137,9 @@
157#define __NR_sched_rr_get_interval 161 137#define __NR_sched_rr_get_interval 161
158#define __NR_nanosleep 162 138#define __NR_nanosleep 162
159#define __NR_mremap 163 139#define __NR_mremap 163
160#define __NR_setresuid 164
161#define __NR_getresuid 165
162#define __NR_query_module 167 140#define __NR_query_module 167
163#define __NR_poll 168 141#define __NR_poll 168
164#define __NR_nfsservctl 169 142#define __NR_nfsservctl 169
165#define __NR_setresgid 170
166#define __NR_getresgid 171
167#define __NR_prctl 172 143#define __NR_prctl 172
168#define __NR_rt_sigreturn 173 144#define __NR_rt_sigreturn 173
169#define __NR_rt_sigaction 174 145#define __NR_rt_sigaction 174
@@ -174,7 +150,6 @@
174#define __NR_rt_sigsuspend 179 150#define __NR_rt_sigsuspend 179
175#define __NR_pread64 180 151#define __NR_pread64 180
176#define __NR_pwrite64 181 152#define __NR_pwrite64 181
177#define __NR_chown 182
178#define __NR_getcwd 183 153#define __NR_getcwd 183
179#define __NR_capget 184 154#define __NR_capget 184
180#define __NR_capset 185 155#define __NR_capset 185
@@ -183,39 +158,11 @@
183#define __NR_getpmsg 188 158#define __NR_getpmsg 188
184#define __NR_putpmsg 189 159#define __NR_putpmsg 189
185#define __NR_vfork 190 160#define __NR_vfork 190
186#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
187#define __NR_mmap2 192
188#define __NR_truncate64 193
189#define __NR_ftruncate64 194
190#define __NR_stat64 195
191#define __NR_lstat64 196
192#define __NR_fstat64 197
193#define __NR_lchown32 198
194#define __NR_getuid32 199
195#define __NR_getgid32 200
196#define __NR_geteuid32 201
197#define __NR_getegid32 202
198#define __NR_setreuid32 203
199#define __NR_setregid32 204
200#define __NR_getgroups32 205
201#define __NR_setgroups32 206
202#define __NR_fchown32 207
203#define __NR_setresuid32 208
204#define __NR_getresuid32 209
205#define __NR_setresgid32 210
206#define __NR_getresgid32 211
207#define __NR_chown32 212
208#define __NR_setuid32 213
209#define __NR_setgid32 214
210#define __NR_setfsuid32 215
211#define __NR_setfsgid32 216
212#define __NR_pivot_root 217 161#define __NR_pivot_root 217
213#define __NR_mincore 218 162#define __NR_mincore 218
214#define __NR_madvise 219 163#define __NR_madvise 219
215#define __NR_getdents64 220 164#define __NR_getdents64 220
216#define __NR_fcntl64 221
217#define __NR_readahead 222 165#define __NR_readahead 222
218#define __NR_sendfile64 223
219#define __NR_setxattr 224 166#define __NR_setxattr 224
220#define __NR_lsetxattr 225 167#define __NR_lsetxattr 225
221#define __NR_fsetxattr 226 168#define __NR_fsetxattr 226
@@ -256,7 +203,6 @@
256#define __NR_clock_getres (__NR_timer_create+7) 203#define __NR_clock_getres (__NR_timer_create+7)
257#define __NR_clock_nanosleep (__NR_timer_create+8) 204#define __NR_clock_nanosleep (__NR_timer_create+8)
258/* Number 263 is reserved for vserver */ 205/* Number 263 is reserved for vserver */
259#define __NR_fadvise64_64 264
260#define __NR_statfs64 265 206#define __NR_statfs64 265
261#define __NR_fstatfs64 266 207#define __NR_fstatfs64 266
262#define __NR_remap_file_pages 267 208#define __NR_remap_file_pages 267
@@ -285,7 +231,6 @@
285#define __NR_mknodat 290 231#define __NR_mknodat 290
286#define __NR_fchownat 291 232#define __NR_fchownat 291
287#define __NR_futimesat 292 233#define __NR_futimesat 292
288#define __NR_fstatat64 293
289#define __NR_unlinkat 294 234#define __NR_unlinkat 294
290#define __NR_renameat 295 235#define __NR_renameat 295
291#define __NR_linkat 296 236#define __NR_linkat 296
@@ -310,62 +255,65 @@
310 * have a different name although they do the same (e.g. __NR_chown32 255 * have a different name although they do the same (e.g. __NR_chown32
311 * is __NR_chown on 64 bit). 256 * is __NR_chown on 64 bit).
312 */ 257 */
313#ifdef __s390x__ 258#ifndef __s390x__
314#undef __NR_time 259
315#undef __NR_lchown 260#define __NR_time 13
316#undef __NR_setuid 261#define __NR_lchown 16
317#undef __NR_getuid 262#define __NR_setuid 23
318#undef __NR_stime 263#define __NR_getuid 24
319#undef __NR_setgid 264#define __NR_stime 25
320#undef __NR_getgid 265#define __NR_setgid 46
321#undef __NR_geteuid 266#define __NR_getgid 47
322#undef __NR_getegid 267#define __NR_geteuid 49
323#undef __NR_setreuid 268#define __NR_getegid 50
324#undef __NR_setregid 269#define __NR_setreuid 70
325#undef __NR_getrlimit 270#define __NR_setregid 71
326#undef __NR_getgroups 271#define __NR_getrlimit 76
327#undef __NR_setgroups 272#define __NR_getgroups 80
328#undef __NR_fchown 273#define __NR_setgroups 81
329#undef __NR_ioperm 274#define __NR_fchown 95
330#undef __NR_setfsuid 275#define __NR_ioperm 101
331#undef __NR_setfsgid 276#define __NR_setfsuid 138
332#undef __NR__llseek 277#define __NR_setfsgid 139
333#undef __NR__newselect 278#define __NR__llseek 140
334#undef __NR_setresuid 279#define __NR__newselect 142
335#undef __NR_getresuid 280#define __NR_setresuid 164
336#undef __NR_setresgid 281#define __NR_getresuid 165
337#undef __NR_getresgid 282#define __NR_setresgid 170
338#undef __NR_chown 283#define __NR_getresgid 171
339#undef __NR_ugetrlimit 284#define __NR_chown 182
340#undef __NR_mmap2 285#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
341#undef __NR_truncate64 286#define __NR_mmap2 192
342#undef __NR_ftruncate64 287#define __NR_truncate64 193
343#undef __NR_stat64 288#define __NR_ftruncate64 194
344#undef __NR_lstat64 289#define __NR_stat64 195
345#undef __NR_fstat64 290#define __NR_lstat64 196
346#undef __NR_lchown32 291#define __NR_fstat64 197
347#undef __NR_getuid32 292#define __NR_lchown32 198
348#undef __NR_getgid32 293#define __NR_getuid32 199
349#undef __NR_geteuid32 294#define __NR_getgid32 200
350#undef __NR_getegid32 295#define __NR_geteuid32 201
351#undef __NR_setreuid32 296#define __NR_getegid32 202
352#undef __NR_setregid32 297#define __NR_setreuid32 203
353#undef __NR_getgroups32 298#define __NR_setregid32 204
354#undef __NR_setgroups32 299#define __NR_getgroups32 205
355#undef __NR_fchown32 300#define __NR_setgroups32 206
356#undef __NR_setresuid32 301#define __NR_fchown32 207
357#undef __NR_getresuid32 302#define __NR_setresuid32 208
358#undef __NR_setresgid32 303#define __NR_getresuid32 209
359#undef __NR_getresgid32 304#define __NR_setresgid32 210
360#undef __NR_chown32 305#define __NR_getresgid32 211
361#undef __NR_setuid32 306#define __NR_chown32 212
362#undef __NR_setgid32 307#define __NR_setuid32 213
363#undef __NR_setfsuid32 308#define __NR_setgid32 214
364#undef __NR_setfsgid32 309#define __NR_setfsuid32 215
365#undef __NR_fcntl64 310#define __NR_setfsgid32 216
366#undef __NR_sendfile64 311#define __NR_fcntl64 221
367#undef __NR_fadvise64_64 312#define __NR_sendfile64 223
368#undef __NR_fstatat64 313#define __NR_fadvise64_64 264
314#define __NR_fstatat64 293
315
316#else
369 317
370#define __NR_select 142 318#define __NR_select 142
371#define __NR_getrlimit 191 /* SuS compliant getrlimit */ 319#define __NR_getrlimit 191 /* SuS compliant getrlimit */
@@ -394,9 +342,11 @@
394 342
395#ifdef __KERNEL__ 343#ifdef __KERNEL__
396 344
345#include <linux/err.h>
346
397#define __syscall_return(type, res) \ 347#define __syscall_return(type, res) \
398do { \ 348do { \
399 if ((unsigned long)(res) >= (unsigned long)(-4095)) {\ 349 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
400 errno = -(res); \ 350 errno = -(res); \
401 res = -1; \ 351 res = -1; \
402 } \ 352 } \
@@ -405,145 +355,145 @@ do { \
405 355
406#define _svc_clobber "1", "cc", "memory" 356#define _svc_clobber "1", "cc", "memory"
407 357
408#define _syscall0(type,name) \ 358#define _syscall0(type,name) \
409type name(void) { \ 359type name(void) { \
410 register long __svcres asm("2"); \ 360 register long __svcres asm("2"); \
411 long __res; \ 361 long __res; \
412 __asm__ __volatile__ ( \ 362 asm volatile( \
413 " .if %1 < 256\n" \ 363 " .if %1 < 256\n" \
414 " svc %b1\n" \ 364 " svc %b1\n" \
415 " .else\n" \ 365 " .else\n" \
416 " la %%r1,%1\n" \ 366 " la %%r1,%1\n" \
417 " svc 0\n" \ 367 " svc 0\n" \
418 " .endif" \ 368 " .endif" \
419 : "=d" (__svcres) \ 369 : "=d" (__svcres) \
420 : "i" (__NR_##name) \ 370 : "i" (__NR_##name) \
421 : _svc_clobber ); \ 371 : _svc_clobber); \
422 __res = __svcres; \ 372 __res = __svcres; \
423 __syscall_return(type,__res); \ 373 __syscall_return(type,__res); \
424} 374}
425 375
426#define _syscall1(type,name,type1,arg1) \ 376#define _syscall1(type,name,type1,arg1) \
427type name(type1 arg1) { \ 377type name(type1 arg1) { \
428 register type1 __arg1 asm("2") = arg1; \ 378 register type1 __arg1 asm("2") = arg1; \
429 register long __svcres asm("2"); \ 379 register long __svcres asm("2"); \
430 long __res; \ 380 long __res; \
431 __asm__ __volatile__ ( \ 381 asm volatile( \
432 " .if %1 < 256\n" \ 382 " .if %1 < 256\n" \
433 " svc %b1\n" \ 383 " svc %b1\n" \
434 " .else\n" \ 384 " .else\n" \
435 " la %%r1,%1\n" \ 385 " la %%r1,%1\n" \
436 " svc 0\n" \ 386 " svc 0\n" \
437 " .endif" \ 387 " .endif" \
438 : "=d" (__svcres) \ 388 : "=d" (__svcres) \
439 : "i" (__NR_##name), \ 389 : "i" (__NR_##name), \
440 "0" (__arg1) \ 390 "0" (__arg1) \
441 : _svc_clobber ); \ 391 : _svc_clobber); \
442 __res = __svcres; \ 392 __res = __svcres; \
443 __syscall_return(type,__res); \ 393 __syscall_return(type,__res); \
444} 394}
445 395
446#define _syscall2(type,name,type1,arg1,type2,arg2) \ 396#define _syscall2(type,name,type1,arg1,type2,arg2) \
447type name(type1 arg1, type2 arg2) { \ 397type name(type1 arg1, type2 arg2) { \
448 register type1 __arg1 asm("2") = arg1; \ 398 register type1 __arg1 asm("2") = arg1; \
449 register type2 __arg2 asm("3") = arg2; \ 399 register type2 __arg2 asm("3") = arg2; \
450 register long __svcres asm("2"); \ 400 register long __svcres asm("2"); \
451 long __res; \ 401 long __res; \
452 __asm__ __volatile__ ( \ 402 asm volatile( \
453 " .if %1 < 256\n" \ 403 " .if %1 < 256\n" \
454 " svc %b1\n" \ 404 " svc %b1\n" \
455 " .else\n" \ 405 " .else\n" \
456 " la %%r1,%1\n" \ 406 " la %%r1,%1\n" \
457 " svc 0\n" \ 407 " svc 0\n" \
458 " .endif" \ 408 " .endif" \
459 : "=d" (__svcres) \ 409 : "=d" (__svcres) \
460 : "i" (__NR_##name), \ 410 : "i" (__NR_##name), \
461 "0" (__arg1), \ 411 "0" (__arg1), \
462 "d" (__arg2) \ 412 "d" (__arg2) \
463 : _svc_clobber ); \ 413 : _svc_clobber ); \
464 __res = __svcres; \ 414 __res = __svcres; \
465 __syscall_return(type,__res); \ 415 __syscall_return(type,__res); \
466} 416}
467 417
468#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)\ 418#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
469type name(type1 arg1, type2 arg2, type3 arg3) { \ 419type name(type1 arg1, type2 arg2, type3 arg3) { \
470 register type1 __arg1 asm("2") = arg1; \ 420 register type1 __arg1 asm("2") = arg1; \
471 register type2 __arg2 asm("3") = arg2; \ 421 register type2 __arg2 asm("3") = arg2; \
472 register type3 __arg3 asm("4") = arg3; \ 422 register type3 __arg3 asm("4") = arg3; \
473 register long __svcres asm("2"); \ 423 register long __svcres asm("2"); \
474 long __res; \ 424 long __res; \
475 __asm__ __volatile__ ( \ 425 asm volatile( \
476 " .if %1 < 256\n" \ 426 " .if %1 < 256\n" \
477 " svc %b1\n" \ 427 " svc %b1\n" \
478 " .else\n" \ 428 " .else\n" \
479 " la %%r1,%1\n" \ 429 " la %%r1,%1\n" \
480 " svc 0\n" \ 430 " svc 0\n" \
481 " .endif" \ 431 " .endif" \
482 : "=d" (__svcres) \ 432 : "=d" (__svcres) \
483 : "i" (__NR_##name), \ 433 : "i" (__NR_##name), \
484 "0" (__arg1), \ 434 "0" (__arg1), \
485 "d" (__arg2), \ 435 "d" (__arg2), \
486 "d" (__arg3) \ 436 "d" (__arg3) \
487 : _svc_clobber ); \ 437 : _svc_clobber); \
488 __res = __svcres; \ 438 __res = __svcres; \
489 __syscall_return(type,__res); \ 439 __syscall_return(type,__res); \
490} 440}
491 441
492#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,\ 442#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3, \
493 type4,name4) \ 443 type4,name4) \
494type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 444type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
495 register type1 __arg1 asm("2") = arg1; \ 445 register type1 __arg1 asm("2") = arg1; \
496 register type2 __arg2 asm("3") = arg2; \ 446 register type2 __arg2 asm("3") = arg2; \
497 register type3 __arg3 asm("4") = arg3; \ 447 register type3 __arg3 asm("4") = arg3; \
498 register type4 __arg4 asm("5") = arg4; \ 448 register type4 __arg4 asm("5") = arg4; \
499 register long __svcres asm("2"); \ 449 register long __svcres asm("2"); \
500 long __res; \ 450 long __res; \
501 __asm__ __volatile__ ( \ 451 asm volatile( \
502 " .if %1 < 256\n" \ 452 " .if %1 < 256\n" \
503 " svc %b1\n" \ 453 " svc %b1\n" \
504 " .else\n" \ 454 " .else\n" \
505 " la %%r1,%1\n" \ 455 " la %%r1,%1\n" \
506 " svc 0\n" \ 456 " svc 0\n" \
507 " .endif" \ 457 " .endif" \
508 : "=d" (__svcres) \ 458 : "=d" (__svcres) \
509 : "i" (__NR_##name), \ 459 : "i" (__NR_##name), \
510 "0" (__arg1), \ 460 "0" (__arg1), \
511 "d" (__arg2), \ 461 "d" (__arg2), \
512 "d" (__arg3), \ 462 "d" (__arg3), \
513 "d" (__arg4) \ 463 "d" (__arg4) \
514 : _svc_clobber ); \ 464 : _svc_clobber); \
515 __res = __svcres; \ 465 __res = __svcres; \
516 __syscall_return(type,__res); \ 466 __syscall_return(type,__res); \
517} 467}
518 468
519#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,\ 469#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3, \
520 type4,name4,type5,name5) \ 470 type4,name4,type5,name5) \
521type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 471type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
522 type5 arg5) { \ 472 type5 arg5) { \
523 register type1 __arg1 asm("2") = arg1; \ 473 register type1 __arg1 asm("2") = arg1; \
524 register type2 __arg2 asm("3") = arg2; \ 474 register type2 __arg2 asm("3") = arg2; \
525 register type3 __arg3 asm("4") = arg3; \ 475 register type3 __arg3 asm("4") = arg3; \
526 register type4 __arg4 asm("5") = arg4; \ 476 register type4 __arg4 asm("5") = arg4; \
527 register type5 __arg5 asm("6") = arg5; \ 477 register type5 __arg5 asm("6") = arg5; \
528 register long __svcres asm("2"); \ 478 register long __svcres asm("2"); \
529 long __res; \ 479 long __res; \
530 __asm__ __volatile__ ( \ 480 asm volatile( \
531 " .if %1 < 256\n" \ 481 " .if %1 < 256\n" \
532 " svc %b1\n" \ 482 " svc %b1\n" \
533 " .else\n" \ 483 " .else\n" \
534 " la %%r1,%1\n" \ 484 " la %%r1,%1\n" \
535 " svc 0\n" \ 485 " svc 0\n" \
536 " .endif" \ 486 " .endif" \
537 : "=d" (__svcres) \ 487 : "=d" (__svcres) \
538 : "i" (__NR_##name), \ 488 : "i" (__NR_##name), \
539 "0" (__arg1), \ 489 "0" (__arg1), \
540 "d" (__arg2), \ 490 "d" (__arg2), \
541 "d" (__arg3), \ 491 "d" (__arg3), \
542 "d" (__arg4), \ 492 "d" (__arg4), \
543 "d" (__arg5) \ 493 "d" (__arg5) \
544 : _svc_clobber ); \ 494 : _svc_clobber); \
545 __res = __svcres; \ 495 __res = __svcres; \
546 __syscall_return(type,__res); \ 496 __syscall_return(type,__res); \
547} 497}
548 498
549#define __ARCH_WANT_IPC_PARSE_VERSION 499#define __ARCH_WANT_IPC_PARSE_VERSION
diff --git a/include/asm-s390/z90crypt.h b/include/asm-s390/z90crypt.h
deleted file mode 100644
index 31a2439b07bd..000000000000
--- a/include/asm-s390/z90crypt.h
+++ /dev/null
@@ -1,212 +0,0 @@
1/*
2 * include/asm-s390/z90crypt.h
3 *
4 * z90crypt 1.3.3 (user-visible header)
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef __ASM_S390_Z90CRYPT_H
28#define __ASM_S390_Z90CRYPT_H
29#include <linux/ioctl.h>
30
31#define z90crypt_VERSION 1
32#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
33#define z90crypt_VARIANT 3 // 3 = CEX2A support
34
35/**
36 * struct ica_rsa_modexpo
37 *
38 * Requirements:
39 * - outputdatalength is at least as large as inputdatalength.
40 * - All key parts are right justified in their fields, padded on
41 * the left with zeroes.
42 * - length(b_key) = inputdatalength
43 * - length(n_modulus) = inputdatalength
44 */
45struct ica_rsa_modexpo {
46 char __user * inputdata;
47 unsigned int inputdatalength;
48 char __user * outputdata;
49 unsigned int outputdatalength;
50 char __user * b_key;
51 char __user * n_modulus;
52};
53
54/**
55 * struct ica_rsa_modexpo_crt
56 *
57 * Requirements:
58 * - inputdatalength is even.
59 * - outputdatalength is at least as large as inputdatalength.
60 * - All key parts are right justified in their fields, padded on
61 * the left with zeroes.
62 * - length(bp_key) = inputdatalength/2 + 8
63 * - length(bq_key) = inputdatalength/2
64 * - length(np_key) = inputdatalength/2 + 8
65 * - length(nq_key) = inputdatalength/2
66 * - length(u_mult_inv) = inputdatalength/2 + 8
67 */
68struct ica_rsa_modexpo_crt {
69 char __user * inputdata;
70 unsigned int inputdatalength;
71 char __user * outputdata;
72 unsigned int outputdatalength;
73 char __user * bp_key;
74 char __user * bq_key;
75 char __user * np_prime;
76 char __user * nq_prime;
77 char __user * u_mult_inv;
78};
79
80#define Z90_IOCTL_MAGIC 'z' // NOTE: Need to allocate from linux folks
81
82/**
83 * Interface notes:
84 *
85 * The ioctl()s which are implemented (along with relevant details)
86 * are:
87 *
88 * ICARSAMODEXPO
89 * Perform an RSA operation using a Modulus-Exponent pair
90 * This takes an ica_rsa_modexpo struct as its arg.
91 *
92 * NOTE: please refer to the comments preceding this structure
93 * for the implementation details for the contents of the
94 * block
95 *
96 * ICARSACRT
97 * Perform an RSA operation using a Chinese-Remainder Theorem key
98 * This takes an ica_rsa_modexpo_crt struct as its arg.
99 *
100 * NOTE: please refer to the comments preceding this structure
101 * for the implementation details for the contents of the
102 * block
103 *
104 * Z90STAT_TOTALCOUNT
105 * Return an integer count of all device types together.
106 *
107 * Z90STAT_PCICACOUNT
108 * Return an integer count of all PCICAs.
109 *
110 * Z90STAT_PCICCCOUNT
111 * Return an integer count of all PCICCs.
112 *
113 * Z90STAT_PCIXCCMCL2COUNT
114 * Return an integer count of all MCL2 PCIXCCs.
115 *
116 * Z90STAT_PCIXCCMCL3COUNT
117 * Return an integer count of all MCL3 PCIXCCs.
118 *
119 * Z90STAT_CEX2CCOUNT
120 * Return an integer count of all CEX2Cs.
121 *
122 * Z90STAT_CEX2ACOUNT
123 * Return an integer count of all CEX2As.
124 *
125 * Z90STAT_REQUESTQ_COUNT
126 * Return an integer count of the number of entries waiting to be
127 * sent to a device.
128 *
129 * Z90STAT_PENDINGQ_COUNT
130 * Return an integer count of the number of entries sent to a
131 * device awaiting the reply.
132 *
133 * Z90STAT_TOTALOPEN_COUNT
134 * Return an integer count of the number of open file handles.
135 *
136 * Z90STAT_DOMAIN_INDEX
137 * Return the integer value of the Cryptographic Domain.
138 *
139 * Z90STAT_STATUS_MASK
140 * Return an 64 element array of unsigned chars for the status of
141 * all devices.
142 * 0x01: PCICA
143 * 0x02: PCICC
144 * 0x03: PCIXCC_MCL2
145 * 0x04: PCIXCC_MCL3
146 * 0x05: CEX2C
147 * 0x06: CEX2A
148 * 0x0d: device is disabled via the proc filesystem
149 *
150 * Z90STAT_QDEPTH_MASK
151 * Return an 64 element array of unsigned chars for the queue
152 * depth of all devices.
153 *
154 * Z90STAT_PERDEV_REQCNT
155 * Return an 64 element array of unsigned integers for the number
156 * of successfully completed requests per device since the device
157 * was detected and made available.
158 *
159 * ICAZ90STATUS (deprecated)
160 * Return some device driver status in a ica_z90_status struct
161 * This takes an ica_z90_status struct as its arg.
162 *
163 * NOTE: this ioctl() is deprecated, and has been replaced with
164 * single ioctl()s for each type of status being requested
165 *
166 * Z90STAT_PCIXCCCOUNT (deprecated)
167 * Return an integer count of all PCIXCCs (MCL2 + MCL3).
168 * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
169 * MCL2 PCIXCCs.
170 *
171 * Z90QUIESCE (not recommended)
172 * Quiesce the driver. This is intended to stop all new
173 * requests from being processed. Its use is NOT recommended,
174 * except in circumstances where there is no other way to stop
175 * callers from accessing the driver. Its original use was to
176 * allow the driver to be "drained" of work in preparation for
177 * a system shutdown.
178 *
179 * NOTE: once issued, this ban on new work cannot be undone
180 * except by unloading and reloading the driver.
181 */
182
183/**
184 * Supported ioctl calls
185 */
186#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x05, 0)
187#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x06, 0)
188
189/* DEPRECATED status calls (bound for removal at some point) */
190#define ICAZ90STATUS _IOR(Z90_IOCTL_MAGIC, 0x10, struct ica_z90_status)
191#define Z90STAT_PCIXCCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x43, int)
192
193/* unrelated to ICA callers */
194#define Z90QUIESCE _IO(Z90_IOCTL_MAGIC, 0x11)
195
196/* New status calls */
197#define Z90STAT_TOTALCOUNT _IOR(Z90_IOCTL_MAGIC, 0x40, int)
198#define Z90STAT_PCICACOUNT _IOR(Z90_IOCTL_MAGIC, 0x41, int)
199#define Z90STAT_PCICCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x42, int)
200#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
201#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
202#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
203#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int)
204#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
205#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
206#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
207#define Z90STAT_DOMAIN_INDEX _IOR(Z90_IOCTL_MAGIC, 0x47, int)
208#define Z90STAT_STATUS_MASK _IOR(Z90_IOCTL_MAGIC, 0x48, char[64])
209#define Z90STAT_QDEPTH_MASK _IOR(Z90_IOCTL_MAGIC, 0x49, char[64])
210#define Z90STAT_PERDEV_REQCNT _IOR(Z90_IOCTL_MAGIC, 0x4a, int[64])
211
212#endif /* __ASM_S390_Z90CRYPT_H */
diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h
new file mode 100644
index 000000000000..7244c68464f2
--- /dev/null
+++ b/include/asm-s390/zcrypt.h
@@ -0,0 +1,285 @@
1/*
2 * include/asm-s390/zcrypt.h
3 *
4 * zcrypt 2.1.0 (user-visible header)
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef __ASM_S390_ZCRYPT_H
28#define __ASM_S390_ZCRYPT_H
29
30#define ZCRYPT_VERSION 2
31#define ZCRYPT_RELEASE 1
32#define ZCRYPT_VARIANT 0
33
34#include <linux/ioctl.h>
35#include <linux/compiler.h>
36
37/**
38 * struct ica_rsa_modexpo
39 *
40 * Requirements:
41 * - outputdatalength is at least as large as inputdatalength.
42 * - All key parts are right justified in their fields, padded on
43 * the left with zeroes.
44 * - length(b_key) = inputdatalength
45 * - length(n_modulus) = inputdatalength
46 */
47struct ica_rsa_modexpo {
48 char __user * inputdata;
49 unsigned int inputdatalength;
50 char __user * outputdata;
51 unsigned int outputdatalength;
52 char __user * b_key;
53 char __user * n_modulus;
54};
55
56/**
57 * struct ica_rsa_modexpo_crt
58 *
59 * Requirements:
60 * - inputdatalength is even.
61 * - outputdatalength is at least as large as inputdatalength.
62 * - All key parts are right justified in their fields, padded on
63 * the left with zeroes.
64 * - length(bp_key) = inputdatalength/2 + 8
65 * - length(bq_key) = inputdatalength/2
66 * - length(np_key) = inputdatalength/2 + 8
67 * - length(nq_key) = inputdatalength/2
68 * - length(u_mult_inv) = inputdatalength/2 + 8
69 */
70struct ica_rsa_modexpo_crt {
71 char __user * inputdata;
72 unsigned int inputdatalength;
73 char __user * outputdata;
74 unsigned int outputdatalength;
75 char __user * bp_key;
76 char __user * bq_key;
77 char __user * np_prime;
78 char __user * nq_prime;
79 char __user * u_mult_inv;
80};
81
82/**
83 * CPRBX
84 * Note that all shorts and ints are big-endian.
85 * All pointer fields are 16 bytes long, and mean nothing.
86 *
87 * A request CPRB is followed by a request_parameter_block.
88 *
89 * The request (or reply) parameter block is organized thus:
90 * function code
91 * VUD block
92 * key block
93 */
94struct ica_CPRBX {
95 unsigned short cprb_len; /* CPRB length 220 */
96 unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
97 unsigned char pad_000[3]; /* Alignment pad bytes */
98 unsigned char func_id[2]; /* function id 0x5432 */
99 unsigned char cprb_flags[4]; /* Flags */
100 unsigned int req_parml; /* request parameter buffer len */
101 unsigned int req_datal; /* request data buffer */
102 unsigned int rpl_msgbl; /* reply message block length */
103 unsigned int rpld_parml; /* replied parameter block len */
104 unsigned int rpl_datal; /* reply data block len */
105 unsigned int rpld_datal; /* replied data block len */
106 unsigned int req_extbl; /* request extension block len */
107 unsigned char pad_001[4]; /* reserved */
108 unsigned int rpld_extbl; /* replied extension block len */
109 unsigned char padx000[16 - sizeof (char *)];
110 unsigned char * req_parmb; /* request parm block 'address' */
111 unsigned char padx001[16 - sizeof (char *)];
112 unsigned char * req_datab; /* request data block 'address' */
113 unsigned char padx002[16 - sizeof (char *)];
114 unsigned char * rpl_parmb; /* reply parm block 'address' */
115 unsigned char padx003[16 - sizeof (char *)];
116 unsigned char * rpl_datab; /* reply data block 'address' */
117 unsigned char padx004[16 - sizeof (char *)];
118 unsigned char * req_extb; /* request extension block 'addr'*/
119 unsigned char padx005[16 - sizeof (char *)];
120 unsigned char * rpl_extb; /* reply extension block 'addres'*/
121 unsigned short ccp_rtcode; /* server return code */
122 unsigned short ccp_rscode; /* server reason code */
123 unsigned int mac_data_len; /* Mac Data Length */
124 unsigned char logon_id[8]; /* Logon Identifier */
125 unsigned char mac_value[8]; /* Mac Value */
126 unsigned char mac_content_flgs;/* Mac content flag byte */
127 unsigned char pad_002; /* Alignment */
128 unsigned short domain; /* Domain */
129 unsigned char usage_domain[4];/* Usage domain */
130 unsigned char cntrl_domain[4];/* Control domain */
131 unsigned char S390enf_mask[4];/* S/390 enforcement mask */
132 unsigned char pad_004[36]; /* reserved */
133};
134
135/**
136 * xcRB
137 */
138struct ica_xcRB {
139 unsigned short agent_ID;
140 unsigned int user_defined;
141 unsigned short request_ID;
142 unsigned int request_control_blk_length;
143 unsigned char padding1[16 - sizeof (char *)];
144 char __user * request_control_blk_addr;
145 unsigned int request_data_length;
146 char padding2[16 - sizeof (char *)];
147 char __user * request_data_address;
148 unsigned int reply_control_blk_length;
149 char padding3[16 - sizeof (char *)];
150 char __user * reply_control_blk_addr;
151 unsigned int reply_data_length;
152 char padding4[16 - sizeof (char *)];
153 char __user * reply_data_addr;
154 unsigned short priority_window;
155 unsigned int status;
156} __attribute__((packed));
157#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
158
159#define ZCRYPT_IOCTL_MAGIC 'z'
160
161/**
162 * Interface notes:
163 *
164 * The ioctl()s which are implemented (along with relevant details)
165 * are:
166 *
167 * ICARSAMODEXPO
168 * Perform an RSA operation using a Modulus-Exponent pair
169 * This takes an ica_rsa_modexpo struct as its arg.
170 *
171 * NOTE: please refer to the comments preceding this structure
172 * for the implementation details for the contents of the
173 * block
174 *
175 * ICARSACRT
176 * Perform an RSA operation using a Chinese-Remainder Theorem key
177 * This takes an ica_rsa_modexpo_crt struct as its arg.
178 *
179 * NOTE: please refer to the comments preceding this structure
180 * for the implementation details for the contents of the
181 * block
182 *
183 * Z90STAT_TOTALCOUNT
184 * Return an integer count of all device types together.
185 *
186 * Z90STAT_PCICACOUNT
187 * Return an integer count of all PCICAs.
188 *
189 * Z90STAT_PCICCCOUNT
190 * Return an integer count of all PCICCs.
191 *
192 * Z90STAT_PCIXCCMCL2COUNT
193 * Return an integer count of all MCL2 PCIXCCs.
194 *
195 * Z90STAT_PCIXCCMCL3COUNT
196 * Return an integer count of all MCL3 PCIXCCs.
197 *
198 * Z90STAT_CEX2CCOUNT
199 * Return an integer count of all CEX2Cs.
200 *
201 * Z90STAT_CEX2ACOUNT
202 * Return an integer count of all CEX2As.
203 *
204 * Z90STAT_REQUESTQ_COUNT
205 * Return an integer count of the number of entries waiting to be
206 * sent to a device.
207 *
208 * Z90STAT_PENDINGQ_COUNT
209 * Return an integer count of the number of entries sent to a
210 * device awaiting the reply.
211 *
212 * Z90STAT_TOTALOPEN_COUNT
213 * Return an integer count of the number of open file handles.
214 *
215 * Z90STAT_DOMAIN_INDEX
216 * Return the integer value of the Cryptographic Domain.
217 *
218 * Z90STAT_STATUS_MASK
219 * Return an 64 element array of unsigned chars for the status of
220 * all devices.
221 * 0x01: PCICA
222 * 0x02: PCICC
223 * 0x03: PCIXCC_MCL2
224 * 0x04: PCIXCC_MCL3
225 * 0x05: CEX2C
226 * 0x06: CEX2A
227 * 0x0d: device is disabled via the proc filesystem
228 *
229 * Z90STAT_QDEPTH_MASK
230 * Return an 64 element array of unsigned chars for the queue
231 * depth of all devices.
232 *
233 * Z90STAT_PERDEV_REQCNT
234 * Return an 64 element array of unsigned integers for the number
235 * of successfully completed requests per device since the device
236 * was detected and made available.
237 *
238 * ICAZ90STATUS (deprecated)
239 * Return some device driver status in a ica_z90_status struct
240 * This takes an ica_z90_status struct as its arg.
241 *
242 * NOTE: this ioctl() is deprecated, and has been replaced with
243 * single ioctl()s for each type of status being requested
244 *
245 * Z90STAT_PCIXCCCOUNT (deprecated)
246 * Return an integer count of all PCIXCCs (MCL2 + MCL3).
247 * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
248 * MCL2 PCIXCCs.
249 *
250 * Z90QUIESCE (not recommended)
251 * Quiesce the driver. This is intended to stop all new
252 * requests from being processed. Its use is NOT recommended,
253 * except in circumstances where there is no other way to stop
254 * callers from accessing the driver. Its original use was to
255 * allow the driver to be "drained" of work in preparation for
256 * a system shutdown.
257 *
258 * NOTE: once issued, this ban on new work cannot be undone
259 * except by unloading and reloading the driver.
260 */
261
262/**
263 * Supported ioctl calls
264 */
265#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0)
266#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
267#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
268
269/* New status calls */
270#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
271#define Z90STAT_PCICACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x41, int)
272#define Z90STAT_PCICCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x42, int)
273#define Z90STAT_PCIXCCMCL2COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4b, int)
274#define Z90STAT_PCIXCCMCL3COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4c, int)
275#define Z90STAT_CEX2CCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4d, int)
276#define Z90STAT_CEX2ACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4e, int)
277#define Z90STAT_REQUESTQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x44, int)
278#define Z90STAT_PENDINGQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x45, int)
279#define Z90STAT_TOTALOPEN_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x46, int)
280#define Z90STAT_DOMAIN_INDEX _IOR(ZCRYPT_IOCTL_MAGIC, 0x47, int)
281#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64])
282#define Z90STAT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x49, char[64])
283#define Z90STAT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4a, int[64])
284
285#endif /* __ASM_S390_ZCRYPT_H */