aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 11:19:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 11:19:39 -0400
commitd0b8883800c913f5cc0eb273c052bcac94ad44d8 (patch)
tree2c9610d6df3545beb916238b314466e6d0e74297 /arch/s390/include
parent9d2da7af909e1cf529f3cac582aaae05b107aa1e (diff)
parent1c21351b722c9101bacdb961f5b5711669c882a0 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 update from Martin Schwidefsky: "This is the first batch of s390 patches for the 3.10 merge window. Included are some performance enhancements: storage key initialization, zero page cache synonyms, system call micro optimization and the speedup patches for dasdfmt. Sebastian managed to get rid of the special casing for the console device in the cio layer. And the usual bunch of bug fixes." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (59 commits) s390/pci: use pci_scan_root_bus s390/scm_blk: fix memleak in init function s390/scm_blk: allow more cluster size values s390/cio: fix irq statistics s390/memory hotplug: prevent offline of active memory increments s390: remove small stack config option s390: system call path micro optimization s390: lowcore stack pointer offsets s390/uapi: change struct statfs[64] member types to unsigned values s390/pci: return correct dma address for offset > PAGE_SIZE s390/ptrace: remove empty ifdefs s390/compat: remove ptrace compat definitions from uapi header file s390/compat: fix compile error for !COMPAT s390/compat: fix compat_sys_statfs() memory corruption s390/zcore: Fix HSA copy length for last block s390/mm,gmap: segment mapping race s390/mm,gmap: implement gmap_translate() s390/pci: remove disable_device implementation s390/pci: disable per default s390/pci: return error after failed pci ops ...
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/bitops.h117
-rw-r--r--arch/s390/include/asm/ccwdev.h3
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/compat.h57
-rw-r--r--arch/s390/include/asm/elf.h23
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pci_debug.h9
-rw-r--r--arch/s390/include/asm/pci_insn.h203
-rw-r--r--arch/s390/include/asm/pci_io.h16
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/processor.h3
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/syscall.h1
-rw-r--r--arch/s390/include/asm/thread_info.h6
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h20
-rw-r--r--arch/s390/include/uapi/asm/statfs.h63
16 files changed, 170 insertions, 362 deletions
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 15422933c60b..4d8604e311f3 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -61,8 +61,6 @@ extern const char _sb_findmap[];
61 61
62#ifndef CONFIG_64BIT 62#ifndef CONFIG_64BIT
63 63
64#define __BITOPS_ALIGN 3
65#define __BITOPS_WORDSIZE 32
66#define __BITOPS_OR "or" 64#define __BITOPS_OR "or"
67#define __BITOPS_AND "nr" 65#define __BITOPS_AND "nr"
68#define __BITOPS_XOR "xr" 66#define __BITOPS_XOR "xr"
@@ -81,8 +79,6 @@ extern const char _sb_findmap[];
81 79
82#else /* CONFIG_64BIT */ 80#else /* CONFIG_64BIT */
83 81
84#define __BITOPS_ALIGN 7
85#define __BITOPS_WORDSIZE 64
86#define __BITOPS_OR "ogr" 82#define __BITOPS_OR "ogr"
87#define __BITOPS_AND "ngr" 83#define __BITOPS_AND "ngr"
88#define __BITOPS_XOR "xgr" 84#define __BITOPS_XOR "xgr"
@@ -101,8 +97,7 @@ extern const char _sb_findmap[];
101 97
102#endif /* CONFIG_64BIT */ 98#endif /* CONFIG_64BIT */
103 99
104#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 100#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
105#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
106 101
107#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
108/* 103/*
@@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
114 109
115 addr = (unsigned long) ptr; 110 addr = (unsigned long) ptr;
116 /* calculate address for CS */ 111 /* calculate address for CS */
117 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 112 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
118 /* make OR mask */ 113 /* make OR mask */
119 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 114 mask = 1UL << (nr & (BITS_PER_LONG - 1));
120 /* Do the atomic update. */ 115 /* Do the atomic update. */
121 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 116 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
122} 117}
@@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
130 125
131 addr = (unsigned long) ptr; 126 addr = (unsigned long) ptr;
132 /* calculate address for CS */ 127 /* calculate address for CS */
133 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 128 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
134 /* make AND mask */ 129 /* make AND mask */
135 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 130 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
136 /* Do the atomic update. */ 131 /* Do the atomic update. */
137 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 132 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
138} 133}
@@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
146 141
147 addr = (unsigned long) ptr; 142 addr = (unsigned long) ptr;
148 /* calculate address for CS */ 143 /* calculate address for CS */
149 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 144 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
150 /* make XOR mask */ 145 /* make XOR mask */
151 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 146 mask = 1UL << (nr & (BITS_PER_LONG - 1));
152 /* Do the atomic update. */ 147 /* Do the atomic update. */
153 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 148 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
154} 149}
@@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
163 158
164 addr = (unsigned long) ptr; 159 addr = (unsigned long) ptr;
165 /* calculate address for CS */ 160 /* calculate address for CS */
166 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 161 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
167 /* make OR/test mask */ 162 /* make OR/test mask */
168 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 163 mask = 1UL << (nr & (BITS_PER_LONG - 1));
169 /* Do the atomic update. */ 164 /* Do the atomic update. */
170 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 165 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
171 __BITOPS_BARRIER(); 166 barrier();
172 return (old & mask) != 0; 167 return (old & mask) != 0;
173} 168}
174 169
@@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
182 177
183 addr = (unsigned long) ptr; 178 addr = (unsigned long) ptr;
184 /* calculate address for CS */ 179 /* calculate address for CS */
185 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 180 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
186 /* make AND/test mask */ 181 /* make AND/test mask */
187 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 182 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
188 /* Do the atomic update. */ 183 /* Do the atomic update. */
189 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 184 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
190 __BITOPS_BARRIER(); 185 barrier();
191 return (old ^ new) != 0; 186 return (old ^ new) != 0;
192} 187}
193 188
@@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
201 196
202 addr = (unsigned long) ptr; 197 addr = (unsigned long) ptr;
203 /* calculate address for CS */ 198 /* calculate address for CS */
204 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 199 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
205 /* make XOR/test mask */ 200 /* make XOR/test mask */
206 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 201 mask = 1UL << (nr & (BITS_PER_LONG - 1));
207 /* Do the atomic update. */ 202 /* Do the atomic update. */
208 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 203 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
209 __BITOPS_BARRIER(); 204 barrier();
210 return (old & mask) != 0; 205 return (old & mask) != 0;
211} 206}
212#endif /* CONFIG_SMP */ 207#endif /* CONFIG_SMP */
@@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
218{ 213{
219 unsigned long addr; 214 unsigned long addr;
220 215
221 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 216 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
222 asm volatile( 217 asm volatile(
223 " oc %O0(1,%R0),%1" 218 " oc %O0(1,%R0),%1"
224 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 219 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
229{ 224{
230 unsigned long addr; 225 unsigned long addr;
231 226
232 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 227 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
233 *(unsigned char *) addr |= 1 << (nr & 7); 228 *(unsigned char *) addr |= 1 << (nr & 7);
234} 229}
235 230
@@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
246{ 241{
247 unsigned long addr; 242 unsigned long addr;
248 243
249 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 244 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
250 asm volatile( 245 asm volatile(
251 " nc %O0(1,%R0),%1" 246 " nc %O0(1,%R0),%1"
252 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); 247 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
@@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
257{ 252{
258 unsigned long addr; 253 unsigned long addr;
259 254
260 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 255 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
261 *(unsigned char *) addr &= ~(1 << (nr & 7)); 256 *(unsigned char *) addr &= ~(1 << (nr & 7));
262} 257}
263 258
@@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
273{ 268{
274 unsigned long addr; 269 unsigned long addr;
275 270
276 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 271 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
277 asm volatile( 272 asm volatile(
278 " xc %O0(1,%R0),%1" 273 " xc %O0(1,%R0),%1"
279 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 274 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
284{ 279{
285 unsigned long addr; 280 unsigned long addr;
286 281
287 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 282 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
288 *(unsigned char *) addr ^= 1 << (nr & 7); 283 *(unsigned char *) addr ^= 1 << (nr & 7);
289} 284}
290 285
@@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
302 unsigned long addr; 297 unsigned long addr;
303 unsigned char ch; 298 unsigned char ch;
304 299
305 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 300 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
306 ch = *(unsigned char *) addr; 301 ch = *(unsigned char *) addr;
307 asm volatile( 302 asm volatile(
308 " oc %O0(1,%R0),%1" 303 " oc %O0(1,%R0),%1"
@@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
321 unsigned long addr; 316 unsigned long addr;
322 unsigned char ch; 317 unsigned char ch;
323 318
324 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 319 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
325 ch = *(unsigned char *) addr; 320 ch = *(unsigned char *) addr;
326 asm volatile( 321 asm volatile(
327 " nc %O0(1,%R0),%1" 322 " nc %O0(1,%R0),%1"
@@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
340 unsigned long addr; 335 unsigned long addr;
341 unsigned char ch; 336 unsigned char ch;
342 337
343 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 338 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
344 ch = *(unsigned char *) addr; 339 ch = *(unsigned char *) addr;
345 asm volatile( 340 asm volatile(
346 " xc %O0(1,%R0),%1" 341 " xc %O0(1,%R0),%1"
@@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
376 unsigned long addr; 371 unsigned long addr;
377 unsigned char ch; 372 unsigned char ch;
378 373
379 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 374 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
380 ch = *(volatile unsigned char *) addr; 375 ch = *(volatile unsigned char *) addr;
381 return (ch >> (nr & 7)) & 1; 376 return (ch >> (nr & 7)) & 1;
382} 377}
@@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
384static inline int 379static inline int
385__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { 380__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
386 return (((volatile char *) addr) 381 return (((volatile char *) addr)
387 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; 382 [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
388} 383}
389 384
390#define test_bit(nr,addr) \ 385#define test_bit(nr,addr) \
@@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr,
693 688
694 if (offset >= size) 689 if (offset >= size)
695 return size; 690 return size;
696 bit = offset & (__BITOPS_WORDSIZE - 1); 691 bit = offset & (BITS_PER_LONG - 1);
697 offset -= bit; 692 offset -= bit;
698 size -= offset; 693 size -= offset;
699 p = addr + offset / __BITOPS_WORDSIZE; 694 p = addr + offset / BITS_PER_LONG;
700 if (bit) { 695 if (bit) {
701 set = __flo_word(0, *p & (~0UL << bit)); 696 set = __flo_word(0, *p & (~0UL << bit));
702 if (set >= size) 697 if (set >= size)
703 return size + offset; 698 return size + offset;
704 if (set < __BITOPS_WORDSIZE) 699 if (set < BITS_PER_LONG)
705 return set + offset; 700 return set + offset;
706 offset += __BITOPS_WORDSIZE; 701 offset += BITS_PER_LONG;
707 size -= __BITOPS_WORDSIZE; 702 size -= BITS_PER_LONG;
708 p++; 703 p++;
709 } 704 }
710 return offset + find_first_bit_left(p, size); 705 return offset + find_first_bit_left(p, size);
@@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr,
736 731
737 if (offset >= size) 732 if (offset >= size)
738 return size; 733 return size;
739 bit = offset & (__BITOPS_WORDSIZE - 1); 734 bit = offset & (BITS_PER_LONG - 1);
740 offset -= bit; 735 offset -= bit;
741 size -= offset; 736 size -= offset;
742 p = addr + offset / __BITOPS_WORDSIZE; 737 p = addr + offset / BITS_PER_LONG;
743 if (bit) { 738 if (bit) {
744 /* 739 /*
745 * __ffz_word returns __BITOPS_WORDSIZE 740 * __ffz_word returns BITS_PER_LONG
746 * if no zero bit is present in the word. 741 * if no zero bit is present in the word.
747 */ 742 */
748 set = __ffz_word(bit, *p >> bit); 743 set = __ffz_word(bit, *p >> bit);
749 if (set >= size) 744 if (set >= size)
750 return size + offset; 745 return size + offset;
751 if (set < __BITOPS_WORDSIZE) 746 if (set < BITS_PER_LONG)
752 return set + offset; 747 return set + offset;
753 offset += __BITOPS_WORDSIZE; 748 offset += BITS_PER_LONG;
754 size -= __BITOPS_WORDSIZE; 749 size -= BITS_PER_LONG;
755 p++; 750 p++;
756 } 751 }
757 return offset + find_first_zero_bit(p, size); 752 return offset + find_first_zero_bit(p, size);
@@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr,
773 768
774 if (offset >= size) 769 if (offset >= size)
775 return size; 770 return size;
776 bit = offset & (__BITOPS_WORDSIZE - 1); 771 bit = offset & (BITS_PER_LONG - 1);
777 offset -= bit; 772 offset -= bit;
778 size -= offset; 773 size -= offset;
779 p = addr + offset / __BITOPS_WORDSIZE; 774 p = addr + offset / BITS_PER_LONG;
780 if (bit) { 775 if (bit) {
781 /* 776 /*
782 * __ffs_word returns __BITOPS_WORDSIZE 777 * __ffs_word returns BITS_PER_LONG
783 * if no one bit is present in the word. 778 * if no one bit is present in the word.
784 */ 779 */
785 set = __ffs_word(0, *p & (~0UL << bit)); 780 set = __ffs_word(0, *p & (~0UL << bit));
786 if (set >= size) 781 if (set >= size)
787 return size + offset; 782 return size + offset;
788 if (set < __BITOPS_WORDSIZE) 783 if (set < BITS_PER_LONG)
789 return set + offset; 784 return set + offset;
790 offset += __BITOPS_WORDSIZE; 785 offset += BITS_PER_LONG;
791 size -= __BITOPS_WORDSIZE; 786 size -= BITS_PER_LONG;
792 p++; 787 p++;
793 } 788 }
794 return offset + find_first_bit(p, size); 789 return offset + find_first_bit(p, size);
@@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
843 838
844 if (offset >= size) 839 if (offset >= size)
845 return size; 840 return size;
846 bit = offset & (__BITOPS_WORDSIZE - 1); 841 bit = offset & (BITS_PER_LONG - 1);
847 offset -= bit; 842 offset -= bit;
848 size -= offset; 843 size -= offset;
849 p = addr + offset / __BITOPS_WORDSIZE; 844 p = addr + offset / BITS_PER_LONG;
850 if (bit) { 845 if (bit) {
851 /* 846 /*
852 * s390 version of ffz returns __BITOPS_WORDSIZE 847 * s390 version of ffz returns BITS_PER_LONG
853 * if no zero bit is present in the word. 848 * if no zero bit is present in the word.
854 */ 849 */
855 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); 850 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
856 if (set >= size) 851 if (set >= size)
857 return size + offset; 852 return size + offset;
858 if (set < __BITOPS_WORDSIZE) 853 if (set < BITS_PER_LONG)
859 return set + offset; 854 return set + offset;
860 offset += __BITOPS_WORDSIZE; 855 offset += BITS_PER_LONG;
861 size -= __BITOPS_WORDSIZE; 856 size -= BITS_PER_LONG;
862 p++; 857 p++;
863 } 858 }
864 return offset + find_first_zero_bit_le(p, size); 859 return offset + find_first_zero_bit_le(p, size);
@@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
885 880
886 if (offset >= size) 881 if (offset >= size)
887 return size; 882 return size;
888 bit = offset & (__BITOPS_WORDSIZE - 1); 883 bit = offset & (BITS_PER_LONG - 1);
889 offset -= bit; 884 offset -= bit;
890 size -= offset; 885 size -= offset;
891 p = addr + offset / __BITOPS_WORDSIZE; 886 p = addr + offset / BITS_PER_LONG;
892 if (bit) { 887 if (bit) {
893 /* 888 /*
894 * s390 version of ffz returns __BITOPS_WORDSIZE 889 * s390 version of ffz returns BITS_PER_LONG
895 * if no zero bit is present in the word. 890 * if no zero bit is present in the word.
896 */ 891 */
897 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); 892 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
898 if (set >= size) 893 if (set >= size)
899 return size + offset; 894 return size + offset;
900 if (set < __BITOPS_WORDSIZE) 895 if (set < BITS_PER_LONG)
901 return set + offset; 896 return set + offset;
902 offset += __BITOPS_WORDSIZE; 897 offset += BITS_PER_LONG;
903 size -= __BITOPS_WORDSIZE; 898 size -= BITS_PER_LONG;
904 p++; 899 p++;
905 } 900 }
906 return offset + find_first_bit_le(p, size); 901 return offset + find_first_bit_le(p, size);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e6061617a50b..f201af8be580 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) 220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
221 221
222extern struct ccw_device *ccw_device_probe_console(void); 222extern struct ccw_device *ccw_device_probe_console(void);
223extern int ccw_device_force_console(void); 223extern void ccw_device_wait_idle(struct ccw_device *);
224extern int ccw_device_force_console(struct ccw_device *);
224 225
225int ccw_device_siosl(struct ccw_device *); 226int ccw_device_siosl(struct ccw_device *);
226 227
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ad2b924167d7..ffb898961c8d 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
296 return 0; 296 return 0;
297} 297}
298 298
299extern void wait_cons_dev(void);
300
301extern void css_schedule_reprobe(void); 299extern void css_schedule_reprobe(void);
302 300
303extern void reipl_ccw_dev(struct ccw_dev_id *id); 301extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index f8c6df6cd1f0..c1e7c646727c 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -70,6 +70,22 @@ typedef u32 compat_ulong_t;
70typedef u64 compat_u64; 70typedef u64 compat_u64;
71typedef u32 compat_uptr_t; 71typedef u32 compat_uptr_t;
72 72
73typedef struct {
74 u32 mask;
75 u32 addr;
76} __aligned(8) psw_compat_t;
77
78typedef struct {
79 psw_compat_t psw;
80 u32 gprs[NUM_GPRS];
81 u32 acrs[NUM_ACRS];
82 u32 orig_gpr2;
83} s390_compat_regs;
84
85typedef struct {
86 u32 gprs_high[NUM_GPRS];
87} s390_compat_regs_high;
88
73struct compat_timespec { 89struct compat_timespec {
74 compat_time_t tv_sec; 90 compat_time_t tv_sec;
75 s32 tv_nsec; 91 s32 tv_nsec;
@@ -124,18 +140,33 @@ struct compat_flock64 {
124}; 140};
125 141
126struct compat_statfs { 142struct compat_statfs {
127 s32 f_type; 143 u32 f_type;
128 s32 f_bsize; 144 u32 f_bsize;
129 s32 f_blocks; 145 u32 f_blocks;
130 s32 f_bfree; 146 u32 f_bfree;
131 s32 f_bavail; 147 u32 f_bavail;
132 s32 f_files; 148 u32 f_files;
133 s32 f_ffree; 149 u32 f_ffree;
150 compat_fsid_t f_fsid;
151 u32 f_namelen;
152 u32 f_frsize;
153 u32 f_flags;
154 u32 f_spare[4];
155};
156
157struct compat_statfs64 {
158 u32 f_type;
159 u32 f_bsize;
160 u64 f_blocks;
161 u64 f_bfree;
162 u64 f_bavail;
163 u64 f_files;
164 u64 f_ffree;
134 compat_fsid_t f_fsid; 165 compat_fsid_t f_fsid;
135 s32 f_namelen; 166 u32 f_namelen;
136 s32 f_frsize; 167 u32 f_frsize;
137 s32 f_flags; 168 u32 f_flags;
138 s32 f_spare[5]; 169 u32 f_spare[4];
139}; 170};
140 171
141#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff 172#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -248,8 +279,6 @@ static inline int is_compat_task(void)
248 return is_32bit_task(); 279 return is_32bit_task();
249} 280}
250 281
251#endif
252
253static inline void __user *arch_compat_alloc_user_space(long len) 282static inline void __user *arch_compat_alloc_user_space(long len)
254{ 283{
255 unsigned long stack; 284 unsigned long stack;
@@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len)
260 return (void __user *) (stack - len); 289 return (void __user *) (stack - len);
261} 290}
262 291
292#endif
293
263struct compat_ipc64_perm { 294struct compat_ipc64_perm {
264 compat_key_t key; 295 compat_key_t key;
265 __compat_uid32_t uid; 296 __compat_uid32_t uid;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1bfdf24b85a2..78f4f8711d58 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -119,6 +119,8 @@
119 */ 119 */
120 120
121#include <asm/ptrace.h> 121#include <asm/ptrace.h>
122#include <asm/compat.h>
123#include <asm/syscall.h>
122#include <asm/user.h> 124#include <asm/user.h>
123 125
124typedef s390_fp_regs elf_fpregset_t; 126typedef s390_fp_regs elf_fpregset_t;
@@ -180,18 +182,31 @@ extern unsigned long elf_hwcap;
180extern char elf_platform[]; 182extern char elf_platform[];
181#define ELF_PLATFORM (elf_platform) 183#define ELF_PLATFORM (elf_platform)
182 184
183#ifdef CONFIG_64BIT 185#ifndef CONFIG_COMPAT
186#define SET_PERSONALITY(ex) \
187do { \
188 set_personality(PER_LINUX | \
189 (current->personality & (~PER_MASK))); \
190 current_thread_info()->sys_call_table = \
191 (unsigned long) &sys_call_table; \
192} while (0)
193#else /* CONFIG_COMPAT */
184#define SET_PERSONALITY(ex) \ 194#define SET_PERSONALITY(ex) \
185do { \ 195do { \
186 if (personality(current->personality) != PER_LINUX32) \ 196 if (personality(current->personality) != PER_LINUX32) \
187 set_personality(PER_LINUX | \ 197 set_personality(PER_LINUX | \
188 (current->personality & ~PER_MASK)); \ 198 (current->personality & ~PER_MASK)); \
189 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 199 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
190 set_thread_flag(TIF_31BIT); \ 200 set_thread_flag(TIF_31BIT); \
191 else \ 201 current_thread_info()->sys_call_table = \
202 (unsigned long) &sys_call_table_emu; \
203 } else { \
192 clear_thread_flag(TIF_31BIT); \ 204 clear_thread_flag(TIF_31BIT); \
205 current_thread_info()->sys_call_table = \
206 (unsigned long) &sys_call_table; \
207 } \
193} while (0) 208} while (0)
194#endif /* CONFIG_64BIT */ 209#endif /* CONFIG_COMPAT */
195 210
196#define STACK_RND_MASK 0x7ffUL 211#define STACK_RND_MASK 0x7ffUL
197 212
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 05333b7f0469..6c1801235db9 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
140struct zpci_dev *zpci_alloc_device(void); 140struct zpci_dev *zpci_alloc_device(void);
141int zpci_create_device(struct zpci_dev *); 141int zpci_create_device(struct zpci_dev *);
142int zpci_enable_device(struct zpci_dev *); 142int zpci_enable_device(struct zpci_dev *);
143int zpci_disable_device(struct zpci_dev *);
143void zpci_stop_device(struct zpci_dev *); 144void zpci_stop_device(struct zpci_dev *);
144void zpci_free_device(struct zpci_dev *); 145void zpci_free_device(struct zpci_dev *);
145int zpci_scan_device(struct zpci_dev *); 146int zpci_scan_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6bbec4265b6e..1ca5d1047c71 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id;
7extern debug_info_t *pci_debug_err_id; 7extern debug_info_t *pci_debug_err_id;
8 8
9#ifdef CONFIG_PCI_DEBUG 9#ifdef CONFIG_PCI_DEBUG
10#define zpci_dbg(fmt, args...) \ 10#define zpci_dbg(imp, fmt, args...) \
11 do { \ 11 debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
12 if (pci_debug_msg_id->level >= 2) \
13 debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
14 } while (0)
15 12
16#else /* !CONFIG_PCI_DEBUG */ 13#else /* !CONFIG_PCI_DEBUG */
17#define zpci_dbg(fmt, args...) do { } while (0) 14#define zpci_dbg(imp, fmt, args...) do { } while (0)
18#endif 15#endif
19 16
20#define zpci_err(text...) \ 17#define zpci_err(text...) \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 1486a98d5dad..e6a2bdd4d705 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_S390_PCI_INSN_H 1#ifndef _ASM_S390_PCI_INSN_H
2#define _ASM_S390_PCI_INSN_H 2#define _ASM_S390_PCI_INSN_H
3 3
4#include <linux/delay.h>
5
6#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
7
8/* Load/Store status codes */ 4/* Load/Store status codes */
9#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 5#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
10#define ZPCI_PCI_ST_FUNC_IN_ERR 8 6#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -82,199 +78,12 @@ struct zpci_fib {
82 u64 reserved7; 78 u64 reserved7;
83} __packed; 79} __packed;
84 80
85/* Modify PCI Function Controls */
86static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
87{
88 u8 cc;
89
90 asm volatile (
91 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
92 " ipm %[cc]\n"
93 " srl %[cc],28\n"
94 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
95 : : "cc");
96 *status = req >> 24 & 0xff;
97 return cc;
98}
99
100static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
101{
102 u8 cc, status;
103
104 do {
105 cc = __mpcifc(req, fib, &status);
106 if (cc == 2)
107 msleep(ZPCI_INSN_BUSY_DELAY);
108 } while (cc == 2);
109
110 if (cc)
111 printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
112 __func__, cc, status);
113 return (cc) ? -EIO : 0;
114}
115
116/* Refresh PCI Translations */
117static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
118{
119 register u64 __addr asm("2") = addr;
120 register u64 __range asm("3") = range;
121 u8 cc;
122
123 asm volatile (
124 " .insn rre,0xb9d30000,%[fn],%[addr]\n"
125 " ipm %[cc]\n"
126 " srl %[cc],28\n"
127 : [cc] "=d" (cc), [fn] "+d" (fn)
128 : [addr] "d" (__addr), "d" (__range)
129 : "cc");
130 *status = fn >> 24 & 0xff;
131 return cc;
132}
133
134static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
135{
136 u8 cc, status;
137
138 do {
139 cc = __rpcit(fn, addr, range, &status);
140 if (cc == 2)
141 udelay(ZPCI_INSN_BUSY_DELAY);
142 } while (cc == 2);
143
144 if (cc)
145 printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
146 __func__, cc, status, addr, range);
147 return (cc) ? -EIO : 0;
148}
149
150/* Store PCI function controls */
151static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
152{
153 u64 fn = (u64) handle << 32 | space << 16;
154 u8 cc;
155
156 asm volatile (
157 " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
158 " ipm %[cc]\n"
159 " srl %[cc],28\n"
160 : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
161 : : "cc");
162 *status = fn >> 24 & 0xff;
163 return cc;
164}
165
166/* Set Interruption Controls */
167static inline void sic_instr(u16 ctl, char *unused, u8 isc)
168{
169 asm volatile (
170 " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
171 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
172}
173
174/* PCI Load */
175static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
176{
177 register u64 __req asm("2") = req;
178 register u64 __offset asm("3") = offset;
179 u64 __data;
180 u8 cc;
181
182 asm volatile (
183 " .insn rre,0xb9d20000,%[data],%[req]\n"
184 " ipm %[cc]\n"
185 " srl %[cc],28\n"
186 : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
187 : "d" (__offset)
188 : "cc");
189 *status = __req >> 24 & 0xff;
190 *data = __data;
191 return cc;
192}
193
194static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
195{
196 u8 cc, status;
197
198 do {
199 cc = __pcilg(data, req, offset, &status);
200 if (cc == 2)
201 udelay(ZPCI_INSN_BUSY_DELAY);
202 } while (cc == 2);
203
204 if (cc) {
205 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
206 __func__, cc, status, req, offset);
207 /* TODO: on IO errors set data to 0xff...
208 * here or in users of pcilg (le conversion)?
209 */
210 }
211 return (cc) ? -EIO : 0;
212}
213
214/* PCI Store */
215static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
216{
217 register u64 __req asm("2") = req;
218 register u64 __offset asm("3") = offset;
219 u8 cc;
220
221 asm volatile (
222 " .insn rre,0xb9d00000,%[data],%[req]\n"
223 " ipm %[cc]\n"
224 " srl %[cc],28\n"
225 : [cc] "=d" (cc), [req] "+d" (__req)
226 : "d" (__offset), [data] "d" (data)
227 : "cc");
228 *status = __req >> 24 & 0xff;
229 return cc;
230}
231
232static inline int pcistg_instr(u64 data, u64 req, u64 offset)
233{
234 u8 cc, status;
235
236 do {
237 cc = __pcistg(data, req, offset, &status);
238 if (cc == 2)
239 udelay(ZPCI_INSN_BUSY_DELAY);
240 } while (cc == 2);
241
242 if (cc)
243 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
244 __func__, cc, status, req, offset);
245 return (cc) ? -EIO : 0;
246}
247
248/* PCI Store Block */
249static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
250{
251 u8 cc;
252
253 asm volatile (
254 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
255 " ipm %[cc]\n"
256 " srl %[cc],28\n"
257 : [cc] "=d" (cc), [req] "+d" (req)
258 : [offset] "d" (offset), [data] "Q" (*data)
259 : "cc");
260 *status = req >> 24 & 0xff;
261 return cc;
262}
263
264static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
265{
266 u8 cc, status;
267
268 do {
269 cc = __pcistb(data, req, offset, &status);
270 if (cc == 2)
271 udelay(ZPCI_INSN_BUSY_DELAY);
272 } while (cc == 2);
273 81
274 if (cc) 82int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
275 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", 83int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
276 __func__, cc, status, req, offset); 84int s390pci_load(u64 *data, u64 req, u64 offset);
277 return (cc) ? -EIO : 0; 85int s390pci_store(u64 data, u64 req, u64 offset);
278} 86int s390pci_store_block(const u64 *data, u64 req, u64 offset);
87void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
279 88
280#endif 89#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 5fd81f31d6c7..83a9caa6ae53 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
36 u64 data; \ 36 u64 data; \
37 int rc; \ 37 int rc; \
38 \ 38 \
39 rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \ 39 rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
40 if (rc) \ 40 if (rc) \
41 data = -1ULL; \ 41 data = -1ULL; \
42 return (RETTYPE) data; \ 42 return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ 50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
51 u64 data = (VALTYPE) val; \ 51 u64 data = (VALTYPE) val; \
52 \ 52 \
53 pcistg_instr(data, req, ZPCI_OFFSET(addr)); \ 53 s390pci_store(data, req, ZPCI_OFFSET(addr)); \
54} 54}
55 55
56zpci_read(8, u64) 56zpci_read(8, u64)
@@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
83 val = 0; /* let FW report error */ 83 val = 0; /* let FW report error */
84 break; 84 break;
85 } 85 }
86 return pcistg_instr(val, req, offset); 86 return s390pci_store(val, req, offset);
87} 87}
88 88
89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) 89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
90{ 90{
91 u64 data; 91 u64 data;
92 u8 cc; 92 int cc;
93
94 cc = s390pci_load(&data, req, offset);
95 if (cc)
96 goto out;
93 97
94 cc = pcilg_instr(&data, req, offset);
95 switch (len) { 98 switch (len) {
96 case 1: 99 case 1:
97 *((u8 *) dst) = (u8) data; 100 *((u8 *) dst) = (u8) data;
@@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
106 *((u64 *) dst) = (u64) data; 109 *((u64 *) dst) = (u64) data;
107 break; 110 break;
108 } 111 }
112out:
109 return cc; 113 return cc;
110} 114}
111 115
112static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) 116static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
113{ 117{
114 return pcistb_instr(data, req, offset); 118 return s390pci_store_block(data, req, offset);
115} 119}
116 120
117static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) 121static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3cb47cf02530..4a64c0e5428f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -764,6 +764,8 @@ void gmap_disable(struct gmap *gmap);
764int gmap_map_segment(struct gmap *gmap, unsigned long from, 764int gmap_map_segment(struct gmap *gmap, unsigned long from,
765 unsigned long to, unsigned long length); 765 unsigned long to, unsigned long length);
766int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 766int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
767unsigned long __gmap_translate(unsigned long address, struct gmap *);
768unsigned long gmap_translate(unsigned long address, struct gmap *);
767unsigned long __gmap_fault(unsigned long address, struct gmap *); 769unsigned long __gmap_fault(unsigned long address, struct gmap *);
768unsigned long gmap_fault(unsigned long address, struct gmap *); 770unsigned long gmap_fault(unsigned long address, struct gmap *);
769void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 771void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 94e749c90230..6b499870662f 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
161 161
162extern void show_code(struct pt_regs *regs); 162extern void show_code(struct pt_regs *regs);
163extern void print_fn_code(unsigned char *code, unsigned long len); 163extern void print_fn_code(unsigned char *code, unsigned long len);
164extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]); 164extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
165 unsigned int len);
165 166
166unsigned long get_wchan(struct task_struct *p); 167unsigned long get_wchan(struct task_struct *p);
167#define task_pt_regs(tsk) ((struct pt_regs *) \ 168#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 3ee5da3bc10c..559512a455da 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,9 +9,7 @@
9#include <uapi/asm/ptrace.h> 9#include <uapi/asm/ptrace.h>
10 10
11#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
12#ifndef __s390x__ 12
13#else /* __s390x__ */
14#endif /* __s390x__ */
15extern long psw_kernel_bits; 13extern long psw_kernel_bits;
16extern long psw_user_bits; 14extern long psw_user_bits;
17 15
@@ -77,8 +75,6 @@ struct per_struct_kernel {
77#define PER_CONTROL_SUSPENSION 0x00400000UL 75#define PER_CONTROL_SUSPENSION 0x00400000UL
78#define PER_CONTROL_ALTERATION 0x00200000UL 76#define PER_CONTROL_ALTERATION 0x00200000UL
79 77
80#ifdef __s390x__
81#endif /* __s390x__ */
82/* 78/*
83 * These are defined as per linux/ptrace.h, which see. 79 * These are defined as per linux/ptrace.h, which see.
84 */ 80 */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fe7b99759e12..cd29d2f4e4f3 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -23,6 +23,7 @@
23 * type here is what we want [need] for both 32 bit and 64 bit systems. 23 * type here is what we want [need] for both 32 bit and 64 bit systems.
24 */ 24 */
25extern const unsigned int sys_call_table[]; 25extern const unsigned int sys_call_table[];
26extern const unsigned int sys_call_table_emu[];
26 27
27static inline long syscall_get_nr(struct task_struct *task, 28static inline long syscall_get_nr(struct task_struct *task,
28 struct pt_regs *regs) 29 struct pt_regs *regs)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 9e2cfe0349c3..eb5f64d26d06 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,13 +14,8 @@
14#define THREAD_ORDER 1 14#define THREAD_ORDER 1
15#define ASYNC_ORDER 1 15#define ASYNC_ORDER 1
16#else /* CONFIG_64BIT */ 16#else /* CONFIG_64BIT */
17#ifndef __SMALL_STACK
18#define THREAD_ORDER 2 17#define THREAD_ORDER 2
19#define ASYNC_ORDER 2 18#define ASYNC_ORDER 2
20#else
21#define THREAD_ORDER 1
22#define ASYNC_ORDER 1
23#endif
24#endif /* CONFIG_64BIT */ 19#endif /* CONFIG_64BIT */
25 20
26#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@@ -41,6 +36,7 @@ struct thread_info {
41 struct task_struct *task; /* main task structure */ 36 struct task_struct *task; /* main task structure */
42 struct exec_domain *exec_domain; /* execution domain */ 37 struct exec_domain *exec_domain; /* execution domain */
43 unsigned long flags; /* low level flags */ 38 unsigned long flags; /* low level flags */
39 unsigned long sys_call_table; /* System call table address */
44 unsigned int cpu; /* current CPU */ 40 unsigned int cpu; /* current CPU */
45 int preempt_count; /* 0 => preemptable, <0 => BUG */ 41 int preempt_count; /* 0 => preemptable, <0 => BUG */
46 struct restart_block restart_block; 42 struct restart_block restart_block;
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index a5ca214b34fd..3aa9f1ec5b29 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -215,12 +215,6 @@ typedef struct
215 unsigned long addr; 215 unsigned long addr;
216} __attribute__ ((aligned(8))) psw_t; 216} __attribute__ ((aligned(8))) psw_t;
217 217
218typedef struct
219{
220 __u32 mask;
221 __u32 addr;
222} __attribute__ ((aligned(8))) psw_compat_t;
223
224#ifndef __s390x__ 218#ifndef __s390x__
225 219
226#define PSW_MASK_PER 0x40000000UL 220#define PSW_MASK_PER 0x40000000UL
@@ -295,20 +289,6 @@ typedef struct
295 unsigned long orig_gpr2; 289 unsigned long orig_gpr2;
296} s390_regs; 290} s390_regs;
297 291
298typedef struct
299{
300 psw_compat_t psw;
301 __u32 gprs[NUM_GPRS];
302 __u32 acrs[NUM_ACRS];
303 __u32 orig_gpr2;
304} s390_compat_regs;
305
306typedef struct
307{
308 __u32 gprs_high[NUM_GPRS];
309} s390_compat_regs_high;
310
311
312/* 292/*
313 * Now for the user space program event recording (trace) definitions. 293 * Now for the user space program event recording (trace) definitions.
314 * The following structures are used only for the ptrace interface, don't 294 * The following structures are used only for the ptrace interface, don't
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index 5acca0a34c20..a61d538756f2 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -7,9 +7,6 @@
7#ifndef _S390_STATFS_H 7#ifndef _S390_STATFS_H
8#define _S390_STATFS_H 8#define _S390_STATFS_H
9 9
10#ifndef __s390x__
11#include <asm-generic/statfs.h>
12#else
13/* 10/*
14 * We can't use <asm-generic/statfs.h> because in 64-bit mode 11 * We can't use <asm-generic/statfs.h> because in 64-bit mode
15 * we mix ints of different sizes in our struct statfs. 12 * we mix ints of different sizes in our struct statfs.
@@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t;
21#endif 18#endif
22 19
23struct statfs { 20struct statfs {
24 int f_type; 21 unsigned int f_type;
25 int f_bsize; 22 unsigned int f_bsize;
26 long f_blocks; 23 unsigned long f_blocks;
27 long f_bfree; 24 unsigned long f_bfree;
28 long f_bavail; 25 unsigned long f_bavail;
29 long f_files; 26 unsigned long f_files;
30 long f_ffree; 27 unsigned long f_ffree;
31 __kernel_fsid_t f_fsid; 28 __kernel_fsid_t f_fsid;
32 int f_namelen; 29 unsigned int f_namelen;
33 int f_frsize; 30 unsigned int f_frsize;
34 int f_flags; 31 unsigned int f_flags;
35 int f_spare[4]; 32 unsigned int f_spare[4];
36}; 33};
37 34
38struct statfs64 { 35struct statfs64 {
39 int f_type; 36 unsigned int f_type;
40 int f_bsize; 37 unsigned int f_bsize;
41 long f_blocks; 38 unsigned long f_blocks;
42 long f_bfree; 39 unsigned long f_bfree;
43 long f_bavail; 40 unsigned long f_bavail;
44 long f_files; 41 unsigned long f_files;
45 long f_ffree; 42 unsigned long f_ffree;
46 __kernel_fsid_t f_fsid; 43 __kernel_fsid_t f_fsid;
47 int f_namelen; 44 unsigned int f_namelen;
48 int f_frsize; 45 unsigned int f_frsize;
49 int f_flags; 46 unsigned int f_flags;
50 int f_spare[4]; 47 unsigned int f_spare[4];
51}; 48};
52 49
53struct compat_statfs64 {
54 __u32 f_type;
55 __u32 f_bsize;
56 __u64 f_blocks;
57 __u64 f_bfree;
58 __u64 f_bavail;
59 __u64 f_files;
60 __u64 f_ffree;
61 __kernel_fsid_t f_fsid;
62 __u32 f_namelen;
63 __u32 f_frsize;
64 __u32 f_flags;
65 __u32 f_spare[4];
66};
67
68#endif /* __s390x__ */
69#endif 50#endif