diff options
Diffstat (limited to 'arch/s390')
52 files changed, 1160 insertions, 1012 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index eb8fb629f00b..bda6ba6f3cf5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -375,19 +375,6 @@ config PACK_STACK | |||
375 | 375 | ||
376 | Say Y if you are unsure. | 376 | Say Y if you are unsure. |
377 | 377 | ||
378 | config SMALL_STACK | ||
379 | def_bool n | ||
380 | prompt "Use 8kb for kernel stack instead of 16kb" | ||
381 | depends on PACK_STACK && 64BIT && !LOCKDEP | ||
382 | help | ||
383 | If you say Y here and the compiler supports the -mkernel-backchain | ||
384 | option the kernel will use a smaller kernel stack size. The reduced | ||
385 | size is 8kb instead of 16kb. This allows to run more threads on a | ||
386 | system and reduces the pressure on the memory management for higher | ||
387 | order page allocations. | ||
388 | |||
389 | Say N if you are unsure. | ||
390 | |||
391 | config CHECK_STACK | 378 | config CHECK_STACK |
392 | def_bool y | 379 | def_bool y |
393 | prompt "Detect kernel stack overflow" | 380 | prompt "Detect kernel stack overflow" |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 7e3ce78d4290..a7d68a467ce8 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls | |||
55 | ifeq ($(call cc-option-yn,-mkernel-backchain),y) | 55 | ifeq ($(call cc-option-yn,-mkernel-backchain),y) |
56 | cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK | 56 | cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK |
57 | aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK | 57 | aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK |
58 | cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK | ||
59 | aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK | ||
60 | ifdef CONFIG_SMALL_STACK | ||
61 | STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) ) | ||
62 | endif | ||
63 | endif | 58 | endif |
64 | 59 | ||
65 | # new style option for packed stacks | 60 | # new style option for packed stacks |
66 | ifeq ($(call cc-option-yn,-mpacked-stack),y) | 61 | ifeq ($(call cc-option-yn,-mpacked-stack),y) |
67 | cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK | 62 | cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK |
68 | aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK | 63 | aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK |
69 | cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK | ||
70 | aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK | ||
71 | ifdef CONFIG_SMALL_STACK | ||
72 | STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) ) | ||
73 | endif | ||
74 | endif | 64 | endif |
75 | 65 | ||
76 | ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) | 66 | ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) |
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index 9fd4a40c6752..bb5dd496614f 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c | |||
@@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df) | |||
105 | int hypfs_dbfs_init(void) | 105 | int hypfs_dbfs_init(void) |
106 | { | 106 | { |
107 | dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); | 107 | dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); |
108 | if (IS_ERR(dbfs_dir)) | 108 | return PTR_RET(dbfs_dir); |
109 | return PTR_ERR(dbfs_dir); | ||
110 | return 0; | ||
111 | } | 109 | } |
112 | 110 | ||
113 | void hypfs_dbfs_exit(void) | 111 | void hypfs_dbfs_exit(void) |
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 15422933c60b..4d8604e311f3 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -61,8 +61,6 @@ extern const char _sb_findmap[]; | |||
61 | 61 | ||
62 | #ifndef CONFIG_64BIT | 62 | #ifndef CONFIG_64BIT |
63 | 63 | ||
64 | #define __BITOPS_ALIGN 3 | ||
65 | #define __BITOPS_WORDSIZE 32 | ||
66 | #define __BITOPS_OR "or" | 64 | #define __BITOPS_OR "or" |
67 | #define __BITOPS_AND "nr" | 65 | #define __BITOPS_AND "nr" |
68 | #define __BITOPS_XOR "xr" | 66 | #define __BITOPS_XOR "xr" |
@@ -81,8 +79,6 @@ extern const char _sb_findmap[]; | |||
81 | 79 | ||
82 | #else /* CONFIG_64BIT */ | 80 | #else /* CONFIG_64BIT */ |
83 | 81 | ||
84 | #define __BITOPS_ALIGN 7 | ||
85 | #define __BITOPS_WORDSIZE 64 | ||
86 | #define __BITOPS_OR "ogr" | 82 | #define __BITOPS_OR "ogr" |
87 | #define __BITOPS_AND "ngr" | 83 | #define __BITOPS_AND "ngr" |
88 | #define __BITOPS_XOR "xgr" | 84 | #define __BITOPS_XOR "xgr" |
@@ -101,8 +97,7 @@ extern const char _sb_findmap[]; | |||
101 | 97 | ||
102 | #endif /* CONFIG_64BIT */ | 98 | #endif /* CONFIG_64BIT */ |
103 | 99 | ||
104 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) | 100 | #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) |
105 | #define __BITOPS_BARRIER() asm volatile("" : : : "memory") | ||
106 | 101 | ||
107 | #ifdef CONFIG_SMP | 102 | #ifdef CONFIG_SMP |
108 | /* | 103 | /* |
@@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) | |||
114 | 109 | ||
115 | addr = (unsigned long) ptr; | 110 | addr = (unsigned long) ptr; |
116 | /* calculate address for CS */ | 111 | /* calculate address for CS */ |
117 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 112 | addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; |
118 | /* make OR mask */ | 113 | /* make OR mask */ |
119 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 114 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
120 | /* Do the atomic update. */ | 115 | /* Do the atomic update. */ |
121 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); | 116 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); |
122 | } | 117 | } |
@@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) | |||
130 | 125 | ||
131 | addr = (unsigned long) ptr; | 126 | addr = (unsigned long) ptr; |
132 | /* calculate address for CS */ | 127 | /* calculate address for CS */ |
133 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 128 | addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; |
134 | /* make AND mask */ | 129 | /* make AND mask */ |
135 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); | 130 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); |
136 | /* Do the atomic update. */ | 131 | /* Do the atomic update. */ |
137 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); | 132 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); |
138 | } | 133 | } |
@@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) | |||
146 | 141 | ||
147 | addr = (unsigned long) ptr; | 142 | addr = (unsigned long) ptr; |
148 | /* calculate address for CS */ | 143 | /* calculate address for CS */ |
149 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 144 | addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; |
150 | /* make XOR mask */ | 145 | /* make XOR mask */ |
151 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 146 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
152 | /* Do the atomic update. */ | 147 | /* Do the atomic update. */ |
153 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); | 148 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); |
154 | } | 149 | } |
@@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) | |||
163 | 158 | ||
164 | addr = (unsigned long) ptr; | 159 | addr = (unsigned long) ptr; |
165 | /* calculate address for CS */ | 160 | /* calculate address for CS */ |
166 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 161 | addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; |
167 | /* make OR/test mask */ | 162 | /* make OR/test mask */ |
168 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 163 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
169 | /* Do the atomic update. */ | 164 | /* Do the atomic update. */ |
170 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); | 165 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); |
171 | __BITOPS_BARRIER(); | 166 | barrier(); |
172 | return (old & mask) != 0; | 167 | return (old & mask) != 0; |
173 | } | 168 | } |
174 | 169 | ||
@@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) | |||
182 | 177 | ||
183 | addr = (unsigned long) ptr; | 178 | addr = (unsigned long) ptr; |
184 | /* calculate address for CS */ | 179 | /* calculate address for CS */ |
185 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 180 | addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; |
186 | /* make AND/test mask */ | 181 | /* make AND/test mask */ |
187 | mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); | 182 | mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); |
188 | /* Do the atomic update. */ | 183 | /* Do the atomic update. */ |
189 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); | 184 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); |
190 | __BITOPS_BARRIER(); | 185 | barrier(); |
191 | return (old ^ new) != 0; | 186 | return (old ^ new) != 0; |
192 | } | 187 | } |
193 | 188 | ||
@@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) | |||
201 | 196 | ||
202 | addr = (unsigned long) ptr; | 197 | addr = (unsigned long) ptr; |
203 | /* calculate address for CS */ | 198 | /* calculate address for CS */ |
204 | addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; | 199 | addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; |
205 | /* make XOR/test mask */ | 200 | /* make XOR/test mask */ |
206 | mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); | 201 | mask = 1UL << (nr & (BITS_PER_LONG - 1)); |
207 | /* Do the atomic update. */ | 202 | /* Do the atomic update. */ |
208 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); | 203 | __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); |
209 | __BITOPS_BARRIER(); | 204 | barrier(); |
210 | return (old & mask) != 0; | 205 | return (old & mask) != 0; |
211 | } | 206 | } |
212 | #endif /* CONFIG_SMP */ | 207 | #endif /* CONFIG_SMP */ |
@@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) | |||
218 | { | 213 | { |
219 | unsigned long addr; | 214 | unsigned long addr; |
220 | 215 | ||
221 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 216 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
222 | asm volatile( | 217 | asm volatile( |
223 | " oc %O0(1,%R0),%1" | 218 | " oc %O0(1,%R0),%1" |
224 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); | 219 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); |
@@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) | |||
229 | { | 224 | { |
230 | unsigned long addr; | 225 | unsigned long addr; |
231 | 226 | ||
232 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 227 | addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
233 | *(unsigned char *) addr |= 1 << (nr & 7); | 228 | *(unsigned char *) addr |= 1 << (nr & 7); |
234 | } | 229 | } |
235 | 230 | ||
@@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) | |||
246 | { | 241 | { |
247 | unsigned long addr; | 242 | unsigned long addr; |
248 | 243 | ||
249 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 244 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
250 | asm volatile( | 245 | asm volatile( |
251 | " nc %O0(1,%R0),%1" | 246 | " nc %O0(1,%R0),%1" |
252 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); | 247 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); |
@@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) | |||
257 | { | 252 | { |
258 | unsigned long addr; | 253 | unsigned long addr; |
259 | 254 | ||
260 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 255 | addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
261 | *(unsigned char *) addr &= ~(1 << (nr & 7)); | 256 | *(unsigned char *) addr &= ~(1 << (nr & 7)); |
262 | } | 257 | } |
263 | 258 | ||
@@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) | |||
273 | { | 268 | { |
274 | unsigned long addr; | 269 | unsigned long addr; |
275 | 270 | ||
276 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 271 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
277 | asm volatile( | 272 | asm volatile( |
278 | " xc %O0(1,%R0),%1" | 273 | " xc %O0(1,%R0),%1" |
279 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); | 274 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); |
@@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) | |||
284 | { | 279 | { |
285 | unsigned long addr; | 280 | unsigned long addr; |
286 | 281 | ||
287 | addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 282 | addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
288 | *(unsigned char *) addr ^= 1 << (nr & 7); | 283 | *(unsigned char *) addr ^= 1 << (nr & 7); |
289 | } | 284 | } |
290 | 285 | ||
@@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
302 | unsigned long addr; | 297 | unsigned long addr; |
303 | unsigned char ch; | 298 | unsigned char ch; |
304 | 299 | ||
305 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 300 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
306 | ch = *(unsigned char *) addr; | 301 | ch = *(unsigned char *) addr; |
307 | asm volatile( | 302 | asm volatile( |
308 | " oc %O0(1,%R0),%1" | 303 | " oc %O0(1,%R0),%1" |
@@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
321 | unsigned long addr; | 316 | unsigned long addr; |
322 | unsigned char ch; | 317 | unsigned char ch; |
323 | 318 | ||
324 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 319 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
325 | ch = *(unsigned char *) addr; | 320 | ch = *(unsigned char *) addr; |
326 | asm volatile( | 321 | asm volatile( |
327 | " nc %O0(1,%R0),%1" | 322 | " nc %O0(1,%R0),%1" |
@@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
340 | unsigned long addr; | 335 | unsigned long addr; |
341 | unsigned char ch; | 336 | unsigned char ch; |
342 | 337 | ||
343 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 338 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
344 | ch = *(unsigned char *) addr; | 339 | ch = *(unsigned char *) addr; |
345 | asm volatile( | 340 | asm volatile( |
346 | " xc %O0(1,%R0),%1" | 341 | " xc %O0(1,%R0),%1" |
@@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr | |||
376 | unsigned long addr; | 371 | unsigned long addr; |
377 | unsigned char ch; | 372 | unsigned char ch; |
378 | 373 | ||
379 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 374 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
380 | ch = *(volatile unsigned char *) addr; | 375 | ch = *(volatile unsigned char *) addr; |
381 | return (ch >> (nr & 7)) & 1; | 376 | return (ch >> (nr & 7)) & 1; |
382 | } | 377 | } |
@@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr | |||
384 | static inline int | 379 | static inline int |
385 | __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { | 380 | __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { |
386 | return (((volatile char *) addr) | 381 | return (((volatile char *) addr) |
387 | [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; | 382 | [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0; |
388 | } | 383 | } |
389 | 384 | ||
390 | #define test_bit(nr,addr) \ | 385 | #define test_bit(nr,addr) \ |
@@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr, | |||
693 | 688 | ||
694 | if (offset >= size) | 689 | if (offset >= size) |
695 | return size; | 690 | return size; |
696 | bit = offset & (__BITOPS_WORDSIZE - 1); | 691 | bit = offset & (BITS_PER_LONG - 1); |
697 | offset -= bit; | 692 | offset -= bit; |
698 | size -= offset; | 693 | size -= offset; |
699 | p = addr + offset / __BITOPS_WORDSIZE; | 694 | p = addr + offset / BITS_PER_LONG; |
700 | if (bit) { | 695 | if (bit) { |
701 | set = __flo_word(0, *p & (~0UL << bit)); | 696 | set = __flo_word(0, *p & (~0UL << bit)); |
702 | if (set >= size) | 697 | if (set >= size) |
703 | return size + offset; | 698 | return size + offset; |
704 | if (set < __BITOPS_WORDSIZE) | 699 | if (set < BITS_PER_LONG) |
705 | return set + offset; | 700 | return set + offset; |
706 | offset += __BITOPS_WORDSIZE; | 701 | offset += BITS_PER_LONG; |
707 | size -= __BITOPS_WORDSIZE; | 702 | size -= BITS_PER_LONG; |
708 | p++; | 703 | p++; |
709 | } | 704 | } |
710 | return offset + find_first_bit_left(p, size); | 705 | return offset + find_first_bit_left(p, size); |
@@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr, | |||
736 | 731 | ||
737 | if (offset >= size) | 732 | if (offset >= size) |
738 | return size; | 733 | return size; |
739 | bit = offset & (__BITOPS_WORDSIZE - 1); | 734 | bit = offset & (BITS_PER_LONG - 1); |
740 | offset -= bit; | 735 | offset -= bit; |
741 | size -= offset; | 736 | size -= offset; |
742 | p = addr + offset / __BITOPS_WORDSIZE; | 737 | p = addr + offset / BITS_PER_LONG; |
743 | if (bit) { | 738 | if (bit) { |
744 | /* | 739 | /* |
745 | * __ffz_word returns __BITOPS_WORDSIZE | 740 | * __ffz_word returns BITS_PER_LONG |
746 | * if no zero bit is present in the word. | 741 | * if no zero bit is present in the word. |
747 | */ | 742 | */ |
748 | set = __ffz_word(bit, *p >> bit); | 743 | set = __ffz_word(bit, *p >> bit); |
749 | if (set >= size) | 744 | if (set >= size) |
750 | return size + offset; | 745 | return size + offset; |
751 | if (set < __BITOPS_WORDSIZE) | 746 | if (set < BITS_PER_LONG) |
752 | return set + offset; | 747 | return set + offset; |
753 | offset += __BITOPS_WORDSIZE; | 748 | offset += BITS_PER_LONG; |
754 | size -= __BITOPS_WORDSIZE; | 749 | size -= BITS_PER_LONG; |
755 | p++; | 750 | p++; |
756 | } | 751 | } |
757 | return offset + find_first_zero_bit(p, size); | 752 | return offset + find_first_zero_bit(p, size); |
@@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr, | |||
773 | 768 | ||
774 | if (offset >= size) | 769 | if (offset >= size) |
775 | return size; | 770 | return size; |
776 | bit = offset & (__BITOPS_WORDSIZE - 1); | 771 | bit = offset & (BITS_PER_LONG - 1); |
777 | offset -= bit; | 772 | offset -= bit; |
778 | size -= offset; | 773 | size -= offset; |
779 | p = addr + offset / __BITOPS_WORDSIZE; | 774 | p = addr + offset / BITS_PER_LONG; |
780 | if (bit) { | 775 | if (bit) { |
781 | /* | 776 | /* |
782 | * __ffs_word returns __BITOPS_WORDSIZE | 777 | * __ffs_word returns BITS_PER_LONG |
783 | * if no one bit is present in the word. | 778 | * if no one bit is present in the word. |
784 | */ | 779 | */ |
785 | set = __ffs_word(0, *p & (~0UL << bit)); | 780 | set = __ffs_word(0, *p & (~0UL << bit)); |
786 | if (set >= size) | 781 | if (set >= size) |
787 | return size + offset; | 782 | return size + offset; |
788 | if (set < __BITOPS_WORDSIZE) | 783 | if (set < BITS_PER_LONG) |
789 | return set + offset; | 784 | return set + offset; |
790 | offset += __BITOPS_WORDSIZE; | 785 | offset += BITS_PER_LONG; |
791 | size -= __BITOPS_WORDSIZE; | 786 | size -= BITS_PER_LONG; |
792 | p++; | 787 | p++; |
793 | } | 788 | } |
794 | return offset + find_first_bit(p, size); | 789 | return offset + find_first_bit(p, size); |
@@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, | |||
843 | 838 | ||
844 | if (offset >= size) | 839 | if (offset >= size) |
845 | return size; | 840 | return size; |
846 | bit = offset & (__BITOPS_WORDSIZE - 1); | 841 | bit = offset & (BITS_PER_LONG - 1); |
847 | offset -= bit; | 842 | offset -= bit; |
848 | size -= offset; | 843 | size -= offset; |
849 | p = addr + offset / __BITOPS_WORDSIZE; | 844 | p = addr + offset / BITS_PER_LONG; |
850 | if (bit) { | 845 | if (bit) { |
851 | /* | 846 | /* |
852 | * s390 version of ffz returns __BITOPS_WORDSIZE | 847 | * s390 version of ffz returns BITS_PER_LONG |
853 | * if no zero bit is present in the word. | 848 | * if no zero bit is present in the word. |
854 | */ | 849 | */ |
855 | set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); | 850 | set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); |
856 | if (set >= size) | 851 | if (set >= size) |
857 | return size + offset; | 852 | return size + offset; |
858 | if (set < __BITOPS_WORDSIZE) | 853 | if (set < BITS_PER_LONG) |
859 | return set + offset; | 854 | return set + offset; |
860 | offset += __BITOPS_WORDSIZE; | 855 | offset += BITS_PER_LONG; |
861 | size -= __BITOPS_WORDSIZE; | 856 | size -= BITS_PER_LONG; |
862 | p++; | 857 | p++; |
863 | } | 858 | } |
864 | return offset + find_first_zero_bit_le(p, size); | 859 | return offset + find_first_zero_bit_le(p, size); |
@@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size, | |||
885 | 880 | ||
886 | if (offset >= size) | 881 | if (offset >= size) |
887 | return size; | 882 | return size; |
888 | bit = offset & (__BITOPS_WORDSIZE - 1); | 883 | bit = offset & (BITS_PER_LONG - 1); |
889 | offset -= bit; | 884 | offset -= bit; |
890 | size -= offset; | 885 | size -= offset; |
891 | p = addr + offset / __BITOPS_WORDSIZE; | 886 | p = addr + offset / BITS_PER_LONG; |
892 | if (bit) { | 887 | if (bit) { |
893 | /* | 888 | /* |
894 | * s390 version of ffz returns __BITOPS_WORDSIZE | 889 | * s390 version of ffz returns BITS_PER_LONG |
895 | * if no zero bit is present in the word. | 890 | * if no zero bit is present in the word. |
896 | */ | 891 | */ |
897 | set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); | 892 | set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); |
898 | if (set >= size) | 893 | if (set >= size) |
899 | return size + offset; | 894 | return size + offset; |
900 | if (set < __BITOPS_WORDSIZE) | 895 | if (set < BITS_PER_LONG) |
901 | return set + offset; | 896 | return set + offset; |
902 | offset += __BITOPS_WORDSIZE; | 897 | offset += BITS_PER_LONG; |
903 | size -= __BITOPS_WORDSIZE; | 898 | size -= BITS_PER_LONG; |
904 | p++; | 899 | p++; |
905 | } | 900 | } |
906 | return offset + find_first_bit_le(p, size); | 901 | return offset + find_first_bit_le(p, size); |
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index e6061617a50b..f201af8be580 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h | |||
@@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); | |||
220 | #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) | 220 | #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) |
221 | 221 | ||
222 | extern struct ccw_device *ccw_device_probe_console(void); | 222 | extern struct ccw_device *ccw_device_probe_console(void); |
223 | extern int ccw_device_force_console(void); | 223 | extern void ccw_device_wait_idle(struct ccw_device *); |
224 | extern int ccw_device_force_console(struct ccw_device *); | ||
224 | 225 | ||
225 | int ccw_device_siosl(struct ccw_device *); | 226 | int ccw_device_siosl(struct ccw_device *); |
226 | 227 | ||
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index ad2b924167d7..ffb898961c8d 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h | |||
@@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, | |||
296 | return 0; | 296 | return 0; |
297 | } | 297 | } |
298 | 298 | ||
299 | extern void wait_cons_dev(void); | ||
300 | |||
301 | extern void css_schedule_reprobe(void); | 299 | extern void css_schedule_reprobe(void); |
302 | 300 | ||
303 | extern void reipl_ccw_dev(struct ccw_dev_id *id); | 301 | extern void reipl_ccw_dev(struct ccw_dev_id *id); |
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index f8c6df6cd1f0..c1e7c646727c 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h | |||
@@ -70,6 +70,22 @@ typedef u32 compat_ulong_t; | |||
70 | typedef u64 compat_u64; | 70 | typedef u64 compat_u64; |
71 | typedef u32 compat_uptr_t; | 71 | typedef u32 compat_uptr_t; |
72 | 72 | ||
73 | typedef struct { | ||
74 | u32 mask; | ||
75 | u32 addr; | ||
76 | } __aligned(8) psw_compat_t; | ||
77 | |||
78 | typedef struct { | ||
79 | psw_compat_t psw; | ||
80 | u32 gprs[NUM_GPRS]; | ||
81 | u32 acrs[NUM_ACRS]; | ||
82 | u32 orig_gpr2; | ||
83 | } s390_compat_regs; | ||
84 | |||
85 | typedef struct { | ||
86 | u32 gprs_high[NUM_GPRS]; | ||
87 | } s390_compat_regs_high; | ||
88 | |||
73 | struct compat_timespec { | 89 | struct compat_timespec { |
74 | compat_time_t tv_sec; | 90 | compat_time_t tv_sec; |
75 | s32 tv_nsec; | 91 | s32 tv_nsec; |
@@ -124,18 +140,33 @@ struct compat_flock64 { | |||
124 | }; | 140 | }; |
125 | 141 | ||
126 | struct compat_statfs { | 142 | struct compat_statfs { |
127 | s32 f_type; | 143 | u32 f_type; |
128 | s32 f_bsize; | 144 | u32 f_bsize; |
129 | s32 f_blocks; | 145 | u32 f_blocks; |
130 | s32 f_bfree; | 146 | u32 f_bfree; |
131 | s32 f_bavail; | 147 | u32 f_bavail; |
132 | s32 f_files; | 148 | u32 f_files; |
133 | s32 f_ffree; | 149 | u32 f_ffree; |
150 | compat_fsid_t f_fsid; | ||
151 | u32 f_namelen; | ||
152 | u32 f_frsize; | ||
153 | u32 f_flags; | ||
154 | u32 f_spare[4]; | ||
155 | }; | ||
156 | |||
157 | struct compat_statfs64 { | ||
158 | u32 f_type; | ||
159 | u32 f_bsize; | ||
160 | u64 f_blocks; | ||
161 | u64 f_bfree; | ||
162 | u64 f_bavail; | ||
163 | u64 f_files; | ||
164 | u64 f_ffree; | ||
134 | compat_fsid_t f_fsid; | 165 | compat_fsid_t f_fsid; |
135 | s32 f_namelen; | 166 | u32 f_namelen; |
136 | s32 f_frsize; | 167 | u32 f_frsize; |
137 | s32 f_flags; | 168 | u32 f_flags; |
138 | s32 f_spare[5]; | 169 | u32 f_spare[4]; |
139 | }; | 170 | }; |
140 | 171 | ||
141 | #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff | 172 | #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff |
@@ -248,8 +279,6 @@ static inline int is_compat_task(void) | |||
248 | return is_32bit_task(); | 279 | return is_32bit_task(); |
249 | } | 280 | } |
250 | 281 | ||
251 | #endif | ||
252 | |||
253 | static inline void __user *arch_compat_alloc_user_space(long len) | 282 | static inline void __user *arch_compat_alloc_user_space(long len) |
254 | { | 283 | { |
255 | unsigned long stack; | 284 | unsigned long stack; |
@@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len) | |||
260 | return (void __user *) (stack - len); | 289 | return (void __user *) (stack - len); |
261 | } | 290 | } |
262 | 291 | ||
292 | #endif | ||
293 | |||
263 | struct compat_ipc64_perm { | 294 | struct compat_ipc64_perm { |
264 | compat_key_t key; | 295 | compat_key_t key; |
265 | __compat_uid32_t uid; | 296 | __compat_uid32_t uid; |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 1bfdf24b85a2..78f4f8711d58 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -119,6 +119,8 @@ | |||
119 | */ | 119 | */ |
120 | 120 | ||
121 | #include <asm/ptrace.h> | 121 | #include <asm/ptrace.h> |
122 | #include <asm/compat.h> | ||
123 | #include <asm/syscall.h> | ||
122 | #include <asm/user.h> | 124 | #include <asm/user.h> |
123 | 125 | ||
124 | typedef s390_fp_regs elf_fpregset_t; | 126 | typedef s390_fp_regs elf_fpregset_t; |
@@ -180,18 +182,31 @@ extern unsigned long elf_hwcap; | |||
180 | extern char elf_platform[]; | 182 | extern char elf_platform[]; |
181 | #define ELF_PLATFORM (elf_platform) | 183 | #define ELF_PLATFORM (elf_platform) |
182 | 184 | ||
183 | #ifdef CONFIG_64BIT | 185 | #ifndef CONFIG_COMPAT |
186 | #define SET_PERSONALITY(ex) \ | ||
187 | do { \ | ||
188 | set_personality(PER_LINUX | \ | ||
189 | (current->personality & (~PER_MASK))); \ | ||
190 | current_thread_info()->sys_call_table = \ | ||
191 | (unsigned long) &sys_call_table; \ | ||
192 | } while (0) | ||
193 | #else /* CONFIG_COMPAT */ | ||
184 | #define SET_PERSONALITY(ex) \ | 194 | #define SET_PERSONALITY(ex) \ |
185 | do { \ | 195 | do { \ |
186 | if (personality(current->personality) != PER_LINUX32) \ | 196 | if (personality(current->personality) != PER_LINUX32) \ |
187 | set_personality(PER_LINUX | \ | 197 | set_personality(PER_LINUX | \ |
188 | (current->personality & ~PER_MASK)); \ | 198 | (current->personality & ~PER_MASK)); \ |
189 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ | 199 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ |
190 | set_thread_flag(TIF_31BIT); \ | 200 | set_thread_flag(TIF_31BIT); \ |
191 | else \ | 201 | current_thread_info()->sys_call_table = \ |
202 | (unsigned long) &sys_call_table_emu; \ | ||
203 | } else { \ | ||
192 | clear_thread_flag(TIF_31BIT); \ | 204 | clear_thread_flag(TIF_31BIT); \ |
205 | current_thread_info()->sys_call_table = \ | ||
206 | (unsigned long) &sys_call_table; \ | ||
207 | } \ | ||
193 | } while (0) | 208 | } while (0) |
194 | #endif /* CONFIG_64BIT */ | 209 | #endif /* CONFIG_COMPAT */ |
195 | 210 | ||
196 | #define STACK_RND_MASK 0x7ffUL | 211 | #define STACK_RND_MASK 0x7ffUL |
197 | 212 | ||
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 593753ee07f3..bd90359d6d22 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h | |||
@@ -114,7 +114,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
114 | #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ | 114 | #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ |
115 | ({ \ | 115 | ({ \ |
116 | pte_t __pte = huge_ptep_get(__ptep); \ | 116 | pte_t __pte = huge_ptep_get(__ptep); \ |
117 | if (pte_write(__pte)) { \ | 117 | if (huge_pte_write(__pte)) { \ |
118 | huge_ptep_invalidate(__mm, __addr, __ptep); \ | 118 | huge_ptep_invalidate(__mm, __addr, __ptep); \ |
119 | set_huge_pte_at(__mm, __addr, __ptep, \ | 119 | set_huge_pte_at(__mm, __addr, __ptep, \ |
120 | huge_pte_wrprotect(__pte)); \ | 120 | huge_pte_wrprotect(__pte)); \ |
@@ -127,4 +127,58 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
127 | huge_ptep_invalidate(vma->vm_mm, address, ptep); | 127 | huge_ptep_invalidate(vma->vm_mm, address, ptep); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) | ||
131 | { | ||
132 | pte_t pte; | ||
133 | pmd_t pmd; | ||
134 | |||
135 | pmd = mk_pmd_phys(page_to_phys(page), pgprot); | ||
136 | pte_val(pte) = pmd_val(pmd); | ||
137 | return pte; | ||
138 | } | ||
139 | |||
140 | static inline int huge_pte_write(pte_t pte) | ||
141 | { | ||
142 | pmd_t pmd; | ||
143 | |||
144 | pmd_val(pmd) = pte_val(pte); | ||
145 | return pmd_write(pmd); | ||
146 | } | ||
147 | |||
148 | static inline int huge_pte_dirty(pte_t pte) | ||
149 | { | ||
150 | /* No dirty bit in the segment table entry. */ | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static inline pte_t huge_pte_mkwrite(pte_t pte) | ||
155 | { | ||
156 | pmd_t pmd; | ||
157 | |||
158 | pmd_val(pmd) = pte_val(pte); | ||
159 | pte_val(pte) = pmd_val(pmd_mkwrite(pmd)); | ||
160 | return pte; | ||
161 | } | ||
162 | |||
163 | static inline pte_t huge_pte_mkdirty(pte_t pte) | ||
164 | { | ||
165 | /* No dirty bit in the segment table entry. */ | ||
166 | return pte; | ||
167 | } | ||
168 | |||
169 | static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) | ||
170 | { | ||
171 | pmd_t pmd; | ||
172 | |||
173 | pmd_val(pmd) = pte_val(pte); | ||
174 | pte_val(pte) = pmd_val(pmd_modify(pmd, newprot)); | ||
175 | return pte; | ||
176 | } | ||
177 | |||
178 | static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, | ||
179 | pte_t *ptep) | ||
180 | { | ||
181 | pmd_clear((pmd_t *) ptep); | ||
182 | } | ||
183 | |||
130 | #endif /* _ASM_S390_HUGETLB_H */ | 184 | #endif /* _ASM_S390_HUGETLB_H */ |
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 27cb32185ce1..379d96e2105e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h | |||
@@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr); | |||
50 | #define ioremap_nocache(addr, size) ioremap(addr, size) | 50 | #define ioremap_nocache(addr, size) ioremap(addr, size) |
51 | #define ioremap_wc ioremap_nocache | 51 | #define ioremap_wc ioremap_nocache |
52 | 52 | ||
53 | /* TODO: s390 cannot support io_remap_pfn_range... */ | ||
54 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
55 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
56 | |||
57 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) | 53 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) |
58 | { | 54 | { |
59 | return (void __iomem *) offset; | 55 | return (void __iomem *) offset; |
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 05333b7f0469..6c1801235db9 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h | |||
@@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) | |||
140 | struct zpci_dev *zpci_alloc_device(void); | 140 | struct zpci_dev *zpci_alloc_device(void); |
141 | int zpci_create_device(struct zpci_dev *); | 141 | int zpci_create_device(struct zpci_dev *); |
142 | int zpci_enable_device(struct zpci_dev *); | 142 | int zpci_enable_device(struct zpci_dev *); |
143 | int zpci_disable_device(struct zpci_dev *); | ||
143 | void zpci_stop_device(struct zpci_dev *); | 144 | void zpci_stop_device(struct zpci_dev *); |
144 | void zpci_free_device(struct zpci_dev *); | 145 | void zpci_free_device(struct zpci_dev *); |
145 | int zpci_scan_device(struct zpci_dev *); | 146 | int zpci_scan_device(struct zpci_dev *); |
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h index 6bbec4265b6e..1ca5d1047c71 100644 --- a/arch/s390/include/asm/pci_debug.h +++ b/arch/s390/include/asm/pci_debug.h | |||
@@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id; | |||
7 | extern debug_info_t *pci_debug_err_id; | 7 | extern debug_info_t *pci_debug_err_id; |
8 | 8 | ||
9 | #ifdef CONFIG_PCI_DEBUG | 9 | #ifdef CONFIG_PCI_DEBUG |
10 | #define zpci_dbg(fmt, args...) \ | 10 | #define zpci_dbg(imp, fmt, args...) \ |
11 | do { \ | 11 | debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args) |
12 | if (pci_debug_msg_id->level >= 2) \ | ||
13 | debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\ | ||
14 | } while (0) | ||
15 | 12 | ||
16 | #else /* !CONFIG_PCI_DEBUG */ | 13 | #else /* !CONFIG_PCI_DEBUG */ |
17 | #define zpci_dbg(fmt, args...) do { } while (0) | 14 | #define zpci_dbg(imp, fmt, args...) do { } while (0) |
18 | #endif | 15 | #endif |
19 | 16 | ||
20 | #define zpci_err(text...) \ | 17 | #define zpci_err(text...) \ |
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 1486a98d5dad..e6a2bdd4d705 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h | |||
@@ -1,10 +1,6 @@ | |||
1 | #ifndef _ASM_S390_PCI_INSN_H | 1 | #ifndef _ASM_S390_PCI_INSN_H |
2 | #define _ASM_S390_PCI_INSN_H | 2 | #define _ASM_S390_PCI_INSN_H |
3 | 3 | ||
4 | #include <linux/delay.h> | ||
5 | |||
6 | #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ | ||
7 | |||
8 | /* Load/Store status codes */ | 4 | /* Load/Store status codes */ |
9 | #define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 | 5 | #define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 |
10 | #define ZPCI_PCI_ST_FUNC_IN_ERR 8 | 6 | #define ZPCI_PCI_ST_FUNC_IN_ERR 8 |
@@ -82,199 +78,12 @@ struct zpci_fib { | |||
82 | u64 reserved7; | 78 | u64 reserved7; |
83 | } __packed; | 79 | } __packed; |
84 | 80 | ||
85 | /* Modify PCI Function Controls */ | ||
86 | static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) | ||
87 | { | ||
88 | u8 cc; | ||
89 | |||
90 | asm volatile ( | ||
91 | " .insn rxy,0xe300000000d0,%[req],%[fib]\n" | ||
92 | " ipm %[cc]\n" | ||
93 | " srl %[cc],28\n" | ||
94 | : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib) | ||
95 | : : "cc"); | ||
96 | *status = req >> 24 & 0xff; | ||
97 | return cc; | ||
98 | } | ||
99 | |||
100 | static inline int mpcifc_instr(u64 req, struct zpci_fib *fib) | ||
101 | { | ||
102 | u8 cc, status; | ||
103 | |||
104 | do { | ||
105 | cc = __mpcifc(req, fib, &status); | ||
106 | if (cc == 2) | ||
107 | msleep(ZPCI_INSN_BUSY_DELAY); | ||
108 | } while (cc == 2); | ||
109 | |||
110 | if (cc) | ||
111 | printk_once(KERN_ERR "%s: error cc: %d status: %d\n", | ||
112 | __func__, cc, status); | ||
113 | return (cc) ? -EIO : 0; | ||
114 | } | ||
115 | |||
116 | /* Refresh PCI Translations */ | ||
117 | static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) | ||
118 | { | ||
119 | register u64 __addr asm("2") = addr; | ||
120 | register u64 __range asm("3") = range; | ||
121 | u8 cc; | ||
122 | |||
123 | asm volatile ( | ||
124 | " .insn rre,0xb9d30000,%[fn],%[addr]\n" | ||
125 | " ipm %[cc]\n" | ||
126 | " srl %[cc],28\n" | ||
127 | : [cc] "=d" (cc), [fn] "+d" (fn) | ||
128 | : [addr] "d" (__addr), "d" (__range) | ||
129 | : "cc"); | ||
130 | *status = fn >> 24 & 0xff; | ||
131 | return cc; | ||
132 | } | ||
133 | |||
134 | static inline int rpcit_instr(u64 fn, u64 addr, u64 range) | ||
135 | { | ||
136 | u8 cc, status; | ||
137 | |||
138 | do { | ||
139 | cc = __rpcit(fn, addr, range, &status); | ||
140 | if (cc == 2) | ||
141 | udelay(ZPCI_INSN_BUSY_DELAY); | ||
142 | } while (cc == 2); | ||
143 | |||
144 | if (cc) | ||
145 | printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n", | ||
146 | __func__, cc, status, addr, range); | ||
147 | return (cc) ? -EIO : 0; | ||
148 | } | ||
149 | |||
150 | /* Store PCI function controls */ | ||
151 | static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status) | ||
152 | { | ||
153 | u64 fn = (u64) handle << 32 | space << 16; | ||
154 | u8 cc; | ||
155 | |||
156 | asm volatile ( | ||
157 | " .insn rxy,0xe300000000d4,%[fn],%[fib]\n" | ||
158 | " ipm %[cc]\n" | ||
159 | " srl %[cc],28\n" | ||
160 | : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib) | ||
161 | : : "cc"); | ||
162 | *status = fn >> 24 & 0xff; | ||
163 | return cc; | ||
164 | } | ||
165 | |||
166 | /* Set Interruption Controls */ | ||
167 | static inline void sic_instr(u16 ctl, char *unused, u8 isc) | ||
168 | { | ||
169 | asm volatile ( | ||
170 | " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" | ||
171 | : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); | ||
172 | } | ||
173 | |||
174 | /* PCI Load */ | ||
175 | static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status) | ||
176 | { | ||
177 | register u64 __req asm("2") = req; | ||
178 | register u64 __offset asm("3") = offset; | ||
179 | u64 __data; | ||
180 | u8 cc; | ||
181 | |||
182 | asm volatile ( | ||
183 | " .insn rre,0xb9d20000,%[data],%[req]\n" | ||
184 | " ipm %[cc]\n" | ||
185 | " srl %[cc],28\n" | ||
186 | : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req) | ||
187 | : "d" (__offset) | ||
188 | : "cc"); | ||
189 | *status = __req >> 24 & 0xff; | ||
190 | *data = __data; | ||
191 | return cc; | ||
192 | } | ||
193 | |||
194 | static inline int pcilg_instr(u64 *data, u64 req, u64 offset) | ||
195 | { | ||
196 | u8 cc, status; | ||
197 | |||
198 | do { | ||
199 | cc = __pcilg(data, req, offset, &status); | ||
200 | if (cc == 2) | ||
201 | udelay(ZPCI_INSN_BUSY_DELAY); | ||
202 | } while (cc == 2); | ||
203 | |||
204 | if (cc) { | ||
205 | printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", | ||
206 | __func__, cc, status, req, offset); | ||
207 | /* TODO: on IO errors set data to 0xff... | ||
208 | * here or in users of pcilg (le conversion)? | ||
209 | */ | ||
210 | } | ||
211 | return (cc) ? -EIO : 0; | ||
212 | } | ||
213 | |||
214 | /* PCI Store */ | ||
215 | static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status) | ||
216 | { | ||
217 | register u64 __req asm("2") = req; | ||
218 | register u64 __offset asm("3") = offset; | ||
219 | u8 cc; | ||
220 | |||
221 | asm volatile ( | ||
222 | " .insn rre,0xb9d00000,%[data],%[req]\n" | ||
223 | " ipm %[cc]\n" | ||
224 | " srl %[cc],28\n" | ||
225 | : [cc] "=d" (cc), [req] "+d" (__req) | ||
226 | : "d" (__offset), [data] "d" (data) | ||
227 | : "cc"); | ||
228 | *status = __req >> 24 & 0xff; | ||
229 | return cc; | ||
230 | } | ||
231 | |||
232 | static inline int pcistg_instr(u64 data, u64 req, u64 offset) | ||
233 | { | ||
234 | u8 cc, status; | ||
235 | |||
236 | do { | ||
237 | cc = __pcistg(data, req, offset, &status); | ||
238 | if (cc == 2) | ||
239 | udelay(ZPCI_INSN_BUSY_DELAY); | ||
240 | } while (cc == 2); | ||
241 | |||
242 | if (cc) | ||
243 | printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", | ||
244 | __func__, cc, status, req, offset); | ||
245 | return (cc) ? -EIO : 0; | ||
246 | } | ||
247 | |||
248 | /* PCI Store Block */ | ||
249 | static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) | ||
250 | { | ||
251 | u8 cc; | ||
252 | |||
253 | asm volatile ( | ||
254 | " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" | ||
255 | " ipm %[cc]\n" | ||
256 | " srl %[cc],28\n" | ||
257 | : [cc] "=d" (cc), [req] "+d" (req) | ||
258 | : [offset] "d" (offset), [data] "Q" (*data) | ||
259 | : "cc"); | ||
260 | *status = req >> 24 & 0xff; | ||
261 | return cc; | ||
262 | } | ||
263 | |||
264 | static inline int pcistb_instr(const u64 *data, u64 req, u64 offset) | ||
265 | { | ||
266 | u8 cc, status; | ||
267 | |||
268 | do { | ||
269 | cc = __pcistb(data, req, offset, &status); | ||
270 | if (cc == 2) | ||
271 | udelay(ZPCI_INSN_BUSY_DELAY); | ||
272 | } while (cc == 2); | ||
273 | 81 | ||
274 | if (cc) | 82 | int s390pci_mod_fc(u64 req, struct zpci_fib *fib); |
275 | printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", | 83 | int s390pci_refresh_trans(u64 fn, u64 addr, u64 range); |
276 | __func__, cc, status, req, offset); | 84 | int s390pci_load(u64 *data, u64 req, u64 offset); |
277 | return (cc) ? -EIO : 0; | 85 | int s390pci_store(u64 data, u64 req, u64 offset); |
278 | } | 86 | int s390pci_store_block(const u64 *data, u64 req, u64 offset); |
87 | void set_irq_ctrl(u16 ctl, char *unused, u8 isc); | ||
279 | 88 | ||
280 | #endif | 89 | #endif |
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 5fd81f31d6c7..83a9caa6ae53 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h | |||
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \ | |||
36 | u64 data; \ | 36 | u64 data; \ |
37 | int rc; \ | 37 | int rc; \ |
38 | \ | 38 | \ |
39 | rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \ | 39 | rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \ |
40 | if (rc) \ | 40 | if (rc) \ |
41 | data = -1ULL; \ | 41 | data = -1ULL; \ |
42 | return (RETTYPE) data; \ | 42 | return (RETTYPE) data; \ |
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \ | |||
50 | u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ | 50 | u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ |
51 | u64 data = (VALTYPE) val; \ | 51 | u64 data = (VALTYPE) val; \ |
52 | \ | 52 | \ |
53 | pcistg_instr(data, req, ZPCI_OFFSET(addr)); \ | 53 | s390pci_store(data, req, ZPCI_OFFSET(addr)); \ |
54 | } | 54 | } |
55 | 55 | ||
56 | zpci_read(8, u64) | 56 | zpci_read(8, u64) |
@@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len | |||
83 | val = 0; /* let FW report error */ | 83 | val = 0; /* let FW report error */ |
84 | break; | 84 | break; |
85 | } | 85 | } |
86 | return pcistg_instr(val, req, offset); | 86 | return s390pci_store(val, req, offset); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) | 89 | static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) |
90 | { | 90 | { |
91 | u64 data; | 91 | u64 data; |
92 | u8 cc; | 92 | int cc; |
93 | |||
94 | cc = s390pci_load(&data, req, offset); | ||
95 | if (cc) | ||
96 | goto out; | ||
93 | 97 | ||
94 | cc = pcilg_instr(&data, req, offset); | ||
95 | switch (len) { | 98 | switch (len) { |
96 | case 1: | 99 | case 1: |
97 | *((u8 *) dst) = (u8) data; | 100 | *((u8 *) dst) = (u8) data; |
@@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) | |||
106 | *((u64 *) dst) = (u64) data; | 109 | *((u64 *) dst) = (u64) data; |
107 | break; | 110 | break; |
108 | } | 111 | } |
112 | out: | ||
109 | return cc; | 113 | return cc; |
110 | } | 114 | } |
111 | 115 | ||
112 | static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) | 116 | static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) |
113 | { | 117 | { |
114 | return pcistb_instr(data, req, offset); | 118 | return s390pci_store_block(data, req, offset); |
115 | } | 119 | } |
116 | 120 | ||
117 | static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) | 121 | static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a5443118cfb..b4622915bd15 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -57,6 +57,10 @@ extern unsigned long zero_page_mask; | |||
57 | (((unsigned long)(vaddr)) &zero_page_mask)))) | 57 | (((unsigned long)(vaddr)) &zero_page_mask)))) |
58 | #define __HAVE_COLOR_ZERO_PAGE | 58 | #define __HAVE_COLOR_ZERO_PAGE |
59 | 59 | ||
60 | /* TODO: s390 cannot support io_remap_pfn_range... */ | ||
61 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
62 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
63 | |||
60 | #endif /* !__ASSEMBLY__ */ | 64 | #endif /* !__ASSEMBLY__ */ |
61 | 65 | ||
62 | /* | 66 | /* |
@@ -420,6 +424,13 @@ extern unsigned long MODULES_END; | |||
420 | #define __S110 PAGE_RW | 424 | #define __S110 PAGE_RW |
421 | #define __S111 PAGE_RW | 425 | #define __S111 PAGE_RW |
422 | 426 | ||
427 | /* | ||
428 | * Segment entry (large page) protection definitions. | ||
429 | */ | ||
430 | #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) | ||
431 | #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) | ||
432 | #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) | ||
433 | |||
423 | static inline int mm_exclusive(struct mm_struct *mm) | 434 | static inline int mm_exclusive(struct mm_struct *mm) |
424 | { | 435 | { |
425 | return likely(mm == current->active_mm && | 436 | return likely(mm == current->active_mm && |
@@ -760,6 +771,8 @@ void gmap_disable(struct gmap *gmap); | |||
760 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | 771 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
761 | unsigned long to, unsigned long length); | 772 | unsigned long to, unsigned long length); |
762 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); | 773 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); |
774 | unsigned long __gmap_translate(unsigned long address, struct gmap *); | ||
775 | unsigned long gmap_translate(unsigned long address, struct gmap *); | ||
763 | unsigned long __gmap_fault(unsigned long address, struct gmap *); | 776 | unsigned long __gmap_fault(unsigned long address, struct gmap *); |
764 | unsigned long gmap_fault(unsigned long address, struct gmap *); | 777 | unsigned long gmap_fault(unsigned long address, struct gmap *); |
765 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); | 778 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); |
@@ -908,26 +921,6 @@ static inline pte_t pte_mkspecial(pte_t pte) | |||
908 | #ifdef CONFIG_HUGETLB_PAGE | 921 | #ifdef CONFIG_HUGETLB_PAGE |
909 | static inline pte_t pte_mkhuge(pte_t pte) | 922 | static inline pte_t pte_mkhuge(pte_t pte) |
910 | { | 923 | { |
911 | /* | ||
912 | * PROT_NONE needs to be remapped from the pte type to the ste type. | ||
913 | * The HW invalid bit is also different for pte and ste. The pte | ||
914 | * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE | ||
915 | * bit, so we don't have to clear it. | ||
916 | */ | ||
917 | if (pte_val(pte) & _PAGE_INVALID) { | ||
918 | if (pte_val(pte) & _PAGE_SWT) | ||
919 | pte_val(pte) |= _HPAGE_TYPE_NONE; | ||
920 | pte_val(pte) |= _SEGMENT_ENTRY_INV; | ||
921 | } | ||
922 | /* | ||
923 | * Clear SW pte bits, there are no SW bits in a segment table entry. | ||
924 | */ | ||
925 | pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | | ||
926 | _PAGE_SWR | _PAGE_SWW); | ||
927 | /* | ||
928 | * Also set the change-override bit because we don't need dirty bit | ||
929 | * tracking for hugetlbfs pages. | ||
930 | */ | ||
931 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); | 924 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); |
932 | return pte; | 925 | return pte; |
933 | } | 926 | } |
@@ -1272,31 +1265,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | |||
1272 | } | 1265 | } |
1273 | } | 1266 | } |
1274 | 1267 | ||
1275 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1268 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
1276 | |||
1277 | #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) | ||
1278 | #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) | ||
1279 | #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) | ||
1280 | |||
1281 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
1282 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); | ||
1283 | |||
1284 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
1285 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); | ||
1286 | |||
1287 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
1288 | { | ||
1289 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; | ||
1290 | } | ||
1291 | |||
1292 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
1293 | pmd_t *pmdp, pmd_t entry) | ||
1294 | { | ||
1295 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) | ||
1296 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; | ||
1297 | *pmdp = entry; | ||
1298 | } | ||
1299 | |||
1300 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | 1269 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
1301 | { | 1270 | { |
1302 | /* | 1271 | /* |
@@ -1317,10 +1286,11 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |||
1317 | return pmd; | 1286 | return pmd; |
1318 | } | 1287 | } |
1319 | 1288 | ||
1320 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | 1289 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) |
1321 | { | 1290 | { |
1322 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | 1291 | pmd_t __pmd; |
1323 | return pmd; | 1292 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); |
1293 | return __pmd; | ||
1324 | } | 1294 | } |
1325 | 1295 | ||
1326 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | 1296 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
@@ -1330,6 +1300,34 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) | |||
1330 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; | 1300 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; |
1331 | return pmd; | 1301 | return pmd; |
1332 | } | 1302 | } |
1303 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ | ||
1304 | |||
1305 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
1306 | |||
1307 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
1308 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); | ||
1309 | |||
1310 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
1311 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); | ||
1312 | |||
1313 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
1314 | { | ||
1315 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; | ||
1316 | } | ||
1317 | |||
1318 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
1319 | pmd_t *pmdp, pmd_t entry) | ||
1320 | { | ||
1321 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) | ||
1322 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; | ||
1323 | *pmdp = entry; | ||
1324 | } | ||
1325 | |||
1326 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
1327 | { | ||
1328 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | ||
1329 | return pmd; | ||
1330 | } | ||
1333 | 1331 | ||
1334 | static inline pmd_t pmd_wrprotect(pmd_t pmd) | 1332 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
1335 | { | 1333 | { |
@@ -1426,13 +1424,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |||
1426 | } | 1424 | } |
1427 | } | 1425 | } |
1428 | 1426 | ||
1429 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) | ||
1430 | { | ||
1431 | pmd_t __pmd; | ||
1432 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); | ||
1433 | return __pmd; | ||
1434 | } | ||
1435 | |||
1436 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) | 1427 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) |
1437 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) | 1428 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
1438 | 1429 | ||
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 94e749c90230..6b499870662f 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t); | |||
161 | 161 | ||
162 | extern void show_code(struct pt_regs *regs); | 162 | extern void show_code(struct pt_regs *regs); |
163 | extern void print_fn_code(unsigned char *code, unsigned long len); | 163 | extern void print_fn_code(unsigned char *code, unsigned long len); |
164 | extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]); | 164 | extern int insn_to_mnemonic(unsigned char *instruction, char *buf, |
165 | unsigned int len); | ||
165 | 166 | ||
166 | unsigned long get_wchan(struct task_struct *p); | 167 | unsigned long get_wchan(struct task_struct *p); |
167 | #define task_pt_regs(tsk) ((struct pt_regs *) \ | 168 | #define task_pt_regs(tsk) ((struct pt_regs *) \ |
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 3ee5da3bc10c..559512a455da 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -9,9 +9,7 @@ | |||
9 | #include <uapi/asm/ptrace.h> | 9 | #include <uapi/asm/ptrace.h> |
10 | 10 | ||
11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
12 | #ifndef __s390x__ | 12 | |
13 | #else /* __s390x__ */ | ||
14 | #endif /* __s390x__ */ | ||
15 | extern long psw_kernel_bits; | 13 | extern long psw_kernel_bits; |
16 | extern long psw_user_bits; | 14 | extern long psw_user_bits; |
17 | 15 | ||
@@ -77,8 +75,6 @@ struct per_struct_kernel { | |||
77 | #define PER_CONTROL_SUSPENSION 0x00400000UL | 75 | #define PER_CONTROL_SUSPENSION 0x00400000UL |
78 | #define PER_CONTROL_ALTERATION 0x00200000UL | 76 | #define PER_CONTROL_ALTERATION 0x00200000UL |
79 | 77 | ||
80 | #ifdef __s390x__ | ||
81 | #endif /* __s390x__ */ | ||
82 | /* | 78 | /* |
83 | * These are defined as per linux/ptrace.h, which see. | 79 | * These are defined as per linux/ptrace.h, which see. |
84 | */ | 80 | */ |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index fe7b99759e12..cd29d2f4e4f3 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
@@ -23,6 +23,7 @@ | |||
23 | * type here is what we want [need] for both 32 bit and 64 bit systems. | 23 | * type here is what we want [need] for both 32 bit and 64 bit systems. |
24 | */ | 24 | */ |
25 | extern const unsigned int sys_call_table[]; | 25 | extern const unsigned int sys_call_table[]; |
26 | extern const unsigned int sys_call_table_emu[]; | ||
26 | 27 | ||
27 | static inline long syscall_get_nr(struct task_struct *task, | 28 | static inline long syscall_get_nr(struct task_struct *task, |
28 | struct pt_regs *regs) | 29 | struct pt_regs *regs) |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 9e2cfe0349c3..eb5f64d26d06 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -14,13 +14,8 @@ | |||
14 | #define THREAD_ORDER 1 | 14 | #define THREAD_ORDER 1 |
15 | #define ASYNC_ORDER 1 | 15 | #define ASYNC_ORDER 1 |
16 | #else /* CONFIG_64BIT */ | 16 | #else /* CONFIG_64BIT */ |
17 | #ifndef __SMALL_STACK | ||
18 | #define THREAD_ORDER 2 | 17 | #define THREAD_ORDER 2 |
19 | #define ASYNC_ORDER 2 | 18 | #define ASYNC_ORDER 2 |
20 | #else | ||
21 | #define THREAD_ORDER 1 | ||
22 | #define ASYNC_ORDER 1 | ||
23 | #endif | ||
24 | #endif /* CONFIG_64BIT */ | 19 | #endif /* CONFIG_64BIT */ |
25 | 20 | ||
26 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) | 21 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) |
@@ -41,6 +36,7 @@ struct thread_info { | |||
41 | struct task_struct *task; /* main task structure */ | 36 | struct task_struct *task; /* main task structure */ |
42 | struct exec_domain *exec_domain; /* execution domain */ | 37 | struct exec_domain *exec_domain; /* execution domain */ |
43 | unsigned long flags; /* low level flags */ | 38 | unsigned long flags; /* low level flags */ |
39 | unsigned long sys_call_table; /* System call table address */ | ||
44 | unsigned int cpu; /* current CPU */ | 40 | unsigned int cpu; /* current CPU */ |
45 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 41 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
46 | struct restart_block restart_block; | 42 | struct restart_block restart_block; |
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index a5ca214b34fd..3aa9f1ec5b29 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h | |||
@@ -215,12 +215,6 @@ typedef struct | |||
215 | unsigned long addr; | 215 | unsigned long addr; |
216 | } __attribute__ ((aligned(8))) psw_t; | 216 | } __attribute__ ((aligned(8))) psw_t; |
217 | 217 | ||
218 | typedef struct | ||
219 | { | ||
220 | __u32 mask; | ||
221 | __u32 addr; | ||
222 | } __attribute__ ((aligned(8))) psw_compat_t; | ||
223 | |||
224 | #ifndef __s390x__ | 218 | #ifndef __s390x__ |
225 | 219 | ||
226 | #define PSW_MASK_PER 0x40000000UL | 220 | #define PSW_MASK_PER 0x40000000UL |
@@ -295,20 +289,6 @@ typedef struct | |||
295 | unsigned long orig_gpr2; | 289 | unsigned long orig_gpr2; |
296 | } s390_regs; | 290 | } s390_regs; |
297 | 291 | ||
298 | typedef struct | ||
299 | { | ||
300 | psw_compat_t psw; | ||
301 | __u32 gprs[NUM_GPRS]; | ||
302 | __u32 acrs[NUM_ACRS]; | ||
303 | __u32 orig_gpr2; | ||
304 | } s390_compat_regs; | ||
305 | |||
306 | typedef struct | ||
307 | { | ||
308 | __u32 gprs_high[NUM_GPRS]; | ||
309 | } s390_compat_regs_high; | ||
310 | |||
311 | |||
312 | /* | 292 | /* |
313 | * Now for the user space program event recording (trace) definitions. | 293 | * Now for the user space program event recording (trace) definitions. |
314 | * The following structures are used only for the ptrace interface, don't | 294 | * The following structures are used only for the ptrace interface, don't |
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h index 5acca0a34c20..a61d538756f2 100644 --- a/arch/s390/include/uapi/asm/statfs.h +++ b/arch/s390/include/uapi/asm/statfs.h | |||
@@ -7,9 +7,6 @@ | |||
7 | #ifndef _S390_STATFS_H | 7 | #ifndef _S390_STATFS_H |
8 | #define _S390_STATFS_H | 8 | #define _S390_STATFS_H |
9 | 9 | ||
10 | #ifndef __s390x__ | ||
11 | #include <asm-generic/statfs.h> | ||
12 | #else | ||
13 | /* | 10 | /* |
14 | * We can't use <asm-generic/statfs.h> because in 64-bit mode | 11 | * We can't use <asm-generic/statfs.h> because in 64-bit mode |
15 | * we mix ints of different sizes in our struct statfs. | 12 | * we mix ints of different sizes in our struct statfs. |
@@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t; | |||
21 | #endif | 18 | #endif |
22 | 19 | ||
23 | struct statfs { | 20 | struct statfs { |
24 | int f_type; | 21 | unsigned int f_type; |
25 | int f_bsize; | 22 | unsigned int f_bsize; |
26 | long f_blocks; | 23 | unsigned long f_blocks; |
27 | long f_bfree; | 24 | unsigned long f_bfree; |
28 | long f_bavail; | 25 | unsigned long f_bavail; |
29 | long f_files; | 26 | unsigned long f_files; |
30 | long f_ffree; | 27 | unsigned long f_ffree; |
31 | __kernel_fsid_t f_fsid; | 28 | __kernel_fsid_t f_fsid; |
32 | int f_namelen; | 29 | unsigned int f_namelen; |
33 | int f_frsize; | 30 | unsigned int f_frsize; |
34 | int f_flags; | 31 | unsigned int f_flags; |
35 | int f_spare[4]; | 32 | unsigned int f_spare[4]; |
36 | }; | 33 | }; |
37 | 34 | ||
38 | struct statfs64 { | 35 | struct statfs64 { |
39 | int f_type; | 36 | unsigned int f_type; |
40 | int f_bsize; | 37 | unsigned int f_bsize; |
41 | long f_blocks; | 38 | unsigned long f_blocks; |
42 | long f_bfree; | 39 | unsigned long f_bfree; |
43 | long f_bavail; | 40 | unsigned long f_bavail; |
44 | long f_files; | 41 | unsigned long f_files; |
45 | long f_ffree; | 42 | unsigned long f_ffree; |
46 | __kernel_fsid_t f_fsid; | 43 | __kernel_fsid_t f_fsid; |
47 | int f_namelen; | 44 | unsigned int f_namelen; |
48 | int f_frsize; | 45 | unsigned int f_frsize; |
49 | int f_flags; | 46 | unsigned int f_flags; |
50 | int f_spare[4]; | 47 | unsigned int f_spare[4]; |
51 | }; | 48 | }; |
52 | 49 | ||
53 | struct compat_statfs64 { | ||
54 | __u32 f_type; | ||
55 | __u32 f_bsize; | ||
56 | __u64 f_blocks; | ||
57 | __u64 f_bfree; | ||
58 | __u64 f_bavail; | ||
59 | __u64 f_files; | ||
60 | __u64 f_ffree; | ||
61 | __kernel_fsid_t f_fsid; | ||
62 | __u32 f_namelen; | ||
63 | __u32 f_frsize; | ||
64 | __u32 f_flags; | ||
65 | __u32 f_spare[4]; | ||
66 | }; | ||
67 | |||
68 | #endif /* __s390x__ */ | ||
69 | #endif | 50 | #endif |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 2ac311ef5c9b..1386fcaf4ef6 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -14,16 +14,25 @@ endif | |||
14 | CFLAGS_smp.o := -Wno-nonnull | 14 | CFLAGS_smp.o := -Wno-nonnull |
15 | 15 | ||
16 | # | 16 | # |
17 | # Disable tailcall optimizations for stack / callchain walking functions | ||
18 | # since this might generate broken code when accessing register 15 and | ||
19 | # passing its content to other functions. | ||
20 | # | ||
21 | CFLAGS_stacktrace.o += -fno-optimize-sibling-calls | ||
22 | CFLAGS_dumpstack.o += -fno-optimize-sibling-calls | ||
23 | |||
24 | # | ||
17 | # Pass UTS_MACHINE for user_regset definition | 25 | # Pass UTS_MACHINE for user_regset definition |
18 | # | 26 | # |
19 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | 27 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' |
20 | 28 | ||
21 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | 29 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w |
22 | 30 | ||
23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ | 31 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o |
24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ | 32 | obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o |
25 | debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ | 33 | obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o |
26 | sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o | 34 | obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o |
35 | obj-y += dumpstack.o | ||
27 | 36 | ||
28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 37 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 38 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index fface87056eb..7a82f9f70100 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -35,6 +35,7 @@ int main(void) | |||
35 | DEFINE(__TI_task, offsetof(struct thread_info, task)); | 35 | DEFINE(__TI_task, offsetof(struct thread_info, task)); |
36 | DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); | 36 | DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); |
37 | DEFINE(__TI_flags, offsetof(struct thread_info, flags)); | 37 | DEFINE(__TI_flags, offsetof(struct thread_info, flags)); |
38 | DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); | ||
38 | DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); | 39 | DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); |
39 | DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); | 40 | DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); |
40 | DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); | 41 | DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 6de049fbe62d..c439ac9ced09 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
362 | /* set extra registers only for synchronous signals */ | 362 | /* set extra registers only for synchronous signals */ |
363 | regs->gprs[4] = regs->int_code & 127; | 363 | regs->gprs[4] = regs->int_code & 127; |
364 | regs->gprs[5] = regs->int_parm_long; | 364 | regs->gprs[5] = regs->int_parm_long; |
365 | regs->gprs[6] = task_thread_info(current)->last_break; | ||
365 | } | 366 | } |
366 | 367 | ||
367 | /* Place signal number on stack to allow backtrace from handler. */ | 368 | /* Place signal number on stack to allow backtrace from handler. */ |
@@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
421 | regs->gprs[2] = map_signal(sig); | 422 | regs->gprs[2] = map_signal(sig); |
422 | regs->gprs[3] = (__force __u64) &frame->info; | 423 | regs->gprs[3] = (__force __u64) &frame->info; |
423 | regs->gprs[4] = (__force __u64) &frame->uc; | 424 | regs->gprs[4] = (__force __u64) &frame->uc; |
425 | regs->gprs[5] = task_thread_info(current)->last_break; | ||
424 | return 0; | 426 | return 0; |
425 | 427 | ||
426 | give_sigsegv: | 428 | give_sigsegv: |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 3ad5e9540160..7f4a4a8c847c 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code) | |||
1696 | * insn_to_mnemonic - decode an s390 instruction | 1696 | * insn_to_mnemonic - decode an s390 instruction |
1697 | * @instruction: instruction to decode | 1697 | * @instruction: instruction to decode |
1698 | * @buf: buffer to fill with mnemonic | 1698 | * @buf: buffer to fill with mnemonic |
1699 | * @len: length of buffer | ||
1699 | * | 1700 | * |
1700 | * Decode the instruction at @instruction and store the corresponding | 1701 | * Decode the instruction at @instruction and store the corresponding |
1701 | * mnemonic into @buf. | 1702 | * mnemonic into @buf of length @len. |
1702 | * @buf is left unchanged if the instruction could not be decoded. | 1703 | * @buf is left unchanged if the instruction could not be decoded. |
1703 | * Returns: | 1704 | * Returns: |
1704 | * %0 on success, %-ENOENT if the instruction was not found. | 1705 | * %0 on success, %-ENOENT if the instruction was not found. |
1705 | */ | 1706 | */ |
1706 | int insn_to_mnemonic(unsigned char *instruction, char buf[8]) | 1707 | int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len) |
1707 | { | 1708 | { |
1708 | struct insn *insn; | 1709 | struct insn *insn; |
1709 | 1710 | ||
@@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8]) | |||
1711 | if (!insn) | 1712 | if (!insn) |
1712 | return -ENOENT; | 1713 | return -ENOENT; |
1713 | if (insn->name[0] == '\0') | 1714 | if (insn->name[0] == '\0') |
1714 | snprintf(buf, 8, "%s", | 1715 | snprintf(buf, len, "%s", |
1715 | long_insn_name[(int) insn->name[1]]); | 1716 | long_insn_name[(int) insn->name[1]]); |
1716 | else | 1717 | else |
1717 | snprintf(buf, 8, "%.5s", insn->name); | 1718 | snprintf(buf, len, "%.5s", insn->name); |
1718 | return 0; | 1719 | return 0; |
1719 | } | 1720 | } |
1720 | EXPORT_SYMBOL_GPL(insn_to_mnemonic); | 1721 | EXPORT_SYMBOL_GPL(insn_to_mnemonic); |
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c new file mode 100644 index 000000000000..03dce39d01ee --- /dev/null +++ b/arch/s390/kernel/dumpstack.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * Stack dumping functions | ||
3 | * | ||
4 | * Copyright IBM Corp. 1999, 2013 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kallsyms.h> | ||
8 | #include <linux/hardirq.h> | ||
9 | #include <linux/kprobes.h> | ||
10 | #include <linux/utsname.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/kdebug.h> | ||
13 | #include <linux/ptrace.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/debug.h> | ||
18 | #include <asm/ipl.h> | ||
19 | |||
20 | #ifndef CONFIG_64BIT | ||
21 | #define LONG "%08lx " | ||
22 | #define FOURLONG "%08lx %08lx %08lx %08lx\n" | ||
23 | static int kstack_depth_to_print = 12; | ||
24 | #else /* CONFIG_64BIT */ | ||
25 | #define LONG "%016lx " | ||
26 | #define FOURLONG "%016lx %016lx %016lx %016lx\n" | ||
27 | static int kstack_depth_to_print = 20; | ||
28 | #endif /* CONFIG_64BIT */ | ||
29 | |||
30 | /* | ||
31 | * For show_trace we have tree different stack to consider: | ||
32 | * - the panic stack which is used if the kernel stack has overflown | ||
33 | * - the asynchronous interrupt stack (cpu related) | ||
34 | * - the synchronous kernel stack (process related) | ||
35 | * The stack trace can start at any of the three stack and can potentially | ||
36 | * touch all of them. The order is: panic stack, async stack, sync stack. | ||
37 | */ | ||
38 | static unsigned long | ||
39 | __show_trace(unsigned long sp, unsigned long low, unsigned long high) | ||
40 | { | ||
41 | struct stack_frame *sf; | ||
42 | struct pt_regs *regs; | ||
43 | |||
44 | while (1) { | ||
45 | sp = sp & PSW_ADDR_INSN; | ||
46 | if (sp < low || sp > high - sizeof(*sf)) | ||
47 | return sp; | ||
48 | sf = (struct stack_frame *) sp; | ||
49 | printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); | ||
50 | print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); | ||
51 | /* Follow the backchain. */ | ||
52 | while (1) { | ||
53 | low = sp; | ||
54 | sp = sf->back_chain & PSW_ADDR_INSN; | ||
55 | if (!sp) | ||
56 | break; | ||
57 | if (sp <= low || sp > high - sizeof(*sf)) | ||
58 | return sp; | ||
59 | sf = (struct stack_frame *) sp; | ||
60 | printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); | ||
61 | print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); | ||
62 | } | ||
63 | /* Zero backchain detected, check for interrupt frame. */ | ||
64 | sp = (unsigned long) (sf + 1); | ||
65 | if (sp <= low || sp > high - sizeof(*regs)) | ||
66 | return sp; | ||
67 | regs = (struct pt_regs *) sp; | ||
68 | printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); | ||
69 | print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); | ||
70 | low = sp; | ||
71 | sp = regs->gprs[15]; | ||
72 | } | ||
73 | } | ||
74 | |||
75 | static void show_trace(struct task_struct *task, unsigned long *stack) | ||
76 | { | ||
77 | register unsigned long __r15 asm ("15"); | ||
78 | unsigned long sp; | ||
79 | |||
80 | sp = (unsigned long) stack; | ||
81 | if (!sp) | ||
82 | sp = task ? task->thread.ksp : __r15; | ||
83 | printk("Call Trace:\n"); | ||
84 | #ifdef CONFIG_CHECK_STACK | ||
85 | sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, | ||
86 | S390_lowcore.panic_stack); | ||
87 | #endif | ||
88 | sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, | ||
89 | S390_lowcore.async_stack); | ||
90 | if (task) | ||
91 | __show_trace(sp, (unsigned long) task_stack_page(task), | ||
92 | (unsigned long) task_stack_page(task) + THREAD_SIZE); | ||
93 | else | ||
94 | __show_trace(sp, S390_lowcore.thread_info, | ||
95 | S390_lowcore.thread_info + THREAD_SIZE); | ||
96 | if (!task) | ||
97 | task = current; | ||
98 | debug_show_held_locks(task); | ||
99 | } | ||
100 | |||
101 | void show_stack(struct task_struct *task, unsigned long *sp) | ||
102 | { | ||
103 | register unsigned long *__r15 asm ("15"); | ||
104 | unsigned long *stack; | ||
105 | int i; | ||
106 | |||
107 | if (!sp) | ||
108 | stack = task ? (unsigned long *) task->thread.ksp : __r15; | ||
109 | else | ||
110 | stack = sp; | ||
111 | |||
112 | for (i = 0; i < kstack_depth_to_print; i++) { | ||
113 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) | ||
114 | break; | ||
115 | if ((i * sizeof(long) % 32) == 0) | ||
116 | printk("%s ", i == 0 ? "" : "\n"); | ||
117 | printk(LONG, *stack++); | ||
118 | } | ||
119 | printk("\n"); | ||
120 | show_trace(task, sp); | ||
121 | } | ||
122 | |||
123 | static void show_last_breaking_event(struct pt_regs *regs) | ||
124 | { | ||
125 | #ifdef CONFIG_64BIT | ||
126 | printk("Last Breaking-Event-Address:\n"); | ||
127 | printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); | ||
128 | print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); | ||
129 | #endif | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * The architecture-independent dump_stack generator | ||
134 | */ | ||
135 | void dump_stack(void) | ||
136 | { | ||
137 | printk("CPU: %d %s %s %.*s\n", | ||
138 | task_thread_info(current)->cpu, print_tainted(), | ||
139 | init_utsname()->release, | ||
140 | (int)strcspn(init_utsname()->version, " "), | ||
141 | init_utsname()->version); | ||
142 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | ||
143 | current->comm, current->pid, current, | ||
144 | (void *) current->thread.ksp); | ||
145 | show_stack(NULL, NULL); | ||
146 | } | ||
147 | EXPORT_SYMBOL(dump_stack); | ||
148 | |||
149 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) | ||
150 | { | ||
151 | return (regs->psw.mask & bits) / ((~bits + 1) & bits); | ||
152 | } | ||
153 | |||
154 | void show_registers(struct pt_regs *regs) | ||
155 | { | ||
156 | char *mode; | ||
157 | |||
158 | mode = user_mode(regs) ? "User" : "Krnl"; | ||
159 | printk("%s PSW : %p %p", | ||
160 | mode, (void *) regs->psw.mask, | ||
161 | (void *) regs->psw.addr); | ||
162 | print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); | ||
163 | printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " | ||
164 | "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), | ||
165 | mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), | ||
166 | mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), | ||
167 | mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), | ||
168 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), | ||
169 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); | ||
170 | #ifdef CONFIG_64BIT | ||
171 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); | ||
172 | #endif | ||
173 | printk("\n%s GPRS: " FOURLONG, mode, | ||
174 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | ||
175 | printk(" " FOURLONG, | ||
176 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); | ||
177 | printk(" " FOURLONG, | ||
178 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); | ||
179 | printk(" " FOURLONG, | ||
180 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); | ||
181 | show_code(regs); | ||
182 | } | ||
183 | |||
184 | void show_regs(struct pt_regs *regs) | ||
185 | { | ||
186 | printk("CPU: %d %s %s %.*s\n", | ||
187 | task_thread_info(current)->cpu, print_tainted(), | ||
188 | init_utsname()->release, | ||
189 | (int)strcspn(init_utsname()->version, " "), | ||
190 | init_utsname()->version); | ||
191 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | ||
192 | current->comm, current->pid, current, | ||
193 | (void *) current->thread.ksp); | ||
194 | show_registers(regs); | ||
195 | /* Show stack backtrace if pt_regs is from kernel mode */ | ||
196 | if (!user_mode(regs)) | ||
197 | show_trace(NULL, (unsigned long *) regs->gprs[15]); | ||
198 | show_last_breaking_event(regs); | ||
199 | } | ||
200 | |||
201 | static DEFINE_SPINLOCK(die_lock); | ||
202 | |||
203 | void die(struct pt_regs *regs, const char *str) | ||
204 | { | ||
205 | static int die_counter; | ||
206 | |||
207 | oops_enter(); | ||
208 | lgr_info_log(); | ||
209 | debug_stop_all(); | ||
210 | console_verbose(); | ||
211 | spin_lock_irq(&die_lock); | ||
212 | bust_spinlocks(1); | ||
213 | printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); | ||
214 | #ifdef CONFIG_PREEMPT | ||
215 | printk("PREEMPT "); | ||
216 | #endif | ||
217 | #ifdef CONFIG_SMP | ||
218 | printk("SMP "); | ||
219 | #endif | ||
220 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
221 | printk("DEBUG_PAGEALLOC"); | ||
222 | #endif | ||
223 | printk("\n"); | ||
224 | notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); | ||
225 | print_modules(); | ||
226 | show_regs(regs); | ||
227 | bust_spinlocks(0); | ||
228 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); | ||
229 | spin_unlock_irq(&die_lock); | ||
230 | if (in_interrupt()) | ||
231 | panic("Fatal exception in interrupt"); | ||
232 | if (panic_on_oops) | ||
233 | panic("Fatal exception: panic_on_oops"); | ||
234 | oops_exit(); | ||
235 | do_exit(SIGSEGV); | ||
236 | } | ||
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 94feff7d6132..4d5e6f8a7978 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | |||
45 | 45 | ||
46 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 46 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
47 | STACK_SIZE = 1 << STACK_SHIFT | 47 | STACK_SIZE = 1 << STACK_SHIFT |
48 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | ||
48 | 49 | ||
49 | #define BASED(name) name-system_call(%r13) | 50 | #define BASED(name) name-system_call(%r13) |
50 | 51 | ||
@@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
97 | sra %r14,\shift | 98 | sra %r14,\shift |
98 | jnz 1f | 99 | jnz 1f |
99 | CHECK_STACK 1<<\shift,\savearea | 100 | CHECK_STACK 1<<\shift,\savearea |
101 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
100 | j 2f | 102 | j 2f |
101 | 1: l %r15,\stack # load target stack | 103 | 1: l %r15,\stack # load target stack |
102 | 2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 104 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) |
103 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
104 | .endm | 105 | .endm |
105 | 106 | ||
106 | .macro ADD64 high,low,timer | 107 | .macro ADD64 high,low,timer |
@@ -150,7 +151,7 @@ ENTRY(__switch_to) | |||
150 | l %r4,__THREAD_info(%r2) # get thread_info of prev | 151 | l %r4,__THREAD_info(%r2) # get thread_info of prev |
151 | l %r5,__THREAD_info(%r3) # get thread_info of next | 152 | l %r5,__THREAD_info(%r3) # get thread_info of next |
152 | lr %r15,%r5 | 153 | lr %r15,%r5 |
153 | ahi %r15,STACK_SIZE # end of kernel stack of next | 154 | ahi %r15,STACK_INIT # end of kernel stack of next |
154 | st %r3,__LC_CURRENT # store task struct of next | 155 | st %r3,__LC_CURRENT # store task struct of next |
155 | st %r5,__LC_THREAD_INFO # store thread info of next | 156 | st %r5,__LC_THREAD_INFO # store thread info of next |
156 | st %r15,__LC_KERNEL_STACK # store end of kernel stack | 157 | st %r15,__LC_KERNEL_STACK # store end of kernel stack |
@@ -178,7 +179,6 @@ sysc_stm: | |||
178 | l %r13,__LC_SVC_NEW_PSW+4 | 179 | l %r13,__LC_SVC_NEW_PSW+4 |
179 | sysc_per: | 180 | sysc_per: |
180 | l %r15,__LC_KERNEL_STACK | 181 | l %r15,__LC_KERNEL_STACK |
181 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
182 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | 182 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
183 | sysc_vtime: | 183 | sysc_vtime: |
184 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER | 184 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER |
@@ -188,6 +188,7 @@ sysc_vtime: | |||
188 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 188 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
189 | sysc_do_svc: | 189 | sysc_do_svc: |
190 | oi __TI_flags+3(%r12),_TIF_SYSCALL | 190 | oi __TI_flags+3(%r12),_TIF_SYSCALL |
191 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | ||
191 | lh %r8,__PT_INT_CODE+2(%r11) | 192 | lh %r8,__PT_INT_CODE+2(%r11) |
192 | sla %r8,2 # shift and test for svc0 | 193 | sla %r8,2 # shift and test for svc0 |
193 | jnz sysc_nr_ok | 194 | jnz sysc_nr_ok |
@@ -198,7 +199,6 @@ sysc_do_svc: | |||
198 | lr %r8,%r1 | 199 | lr %r8,%r1 |
199 | sla %r8,2 | 200 | sla %r8,2 |
200 | sysc_nr_ok: | 201 | sysc_nr_ok: |
201 | l %r10,BASED(.Lsys_call_table) # 31 bit system call table | ||
202 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 202 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
203 | st %r2,__PT_ORIG_GPR2(%r11) | 203 | st %r2,__PT_ORIG_GPR2(%r11) |
204 | st %r7,STACK_FRAME_OVERHEAD(%r15) | 204 | st %r7,STACK_FRAME_OVERHEAD(%r15) |
@@ -359,11 +359,11 @@ ENTRY(pgm_check_handler) | |||
359 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 359 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
360 | jnz pgm_svcper # -> single stepped svc | 360 | jnz pgm_svcper # -> single stepped svc |
361 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | 361 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
362 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
362 | j 2f | 363 | j 2f |
363 | 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER | 364 | 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER |
364 | l %r15,__LC_KERNEL_STACK | 365 | l %r15,__LC_KERNEL_STACK |
365 | 2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 366 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) |
366 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
367 | stm %r0,%r7,__PT_R0(%r11) | 367 | stm %r0,%r7,__PT_R0(%r11) |
368 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC | 368 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC |
369 | stm %r8,%r9,__PT_PSW(%r11) | 369 | stm %r8,%r9,__PT_PSW(%r11) |
@@ -485,7 +485,6 @@ io_work: | |||
485 | # | 485 | # |
486 | io_work_user: | 486 | io_work_user: |
487 | l %r1,__LC_KERNEL_STACK | 487 | l %r1,__LC_KERNEL_STACK |
488 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
489 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 488 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
490 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 489 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
491 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 490 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
@@ -646,7 +645,6 @@ mcck_skip: | |||
646 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 645 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
647 | jno mcck_return | 646 | jno mcck_return |
648 | l %r1,__LC_KERNEL_STACK # switch to kernel stack | 647 | l %r1,__LC_KERNEL_STACK # switch to kernel stack |
649 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
650 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 648 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
651 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 649 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
652 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 650 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
@@ -674,6 +672,7 @@ mcck_panic: | |||
674 | sra %r14,PAGE_SHIFT | 672 | sra %r14,PAGE_SHIFT |
675 | jz 0f | 673 | jz 0f |
676 | l %r15,__LC_PANIC_STACK | 674 | l %r15,__LC_PANIC_STACK |
675 | j mcck_skip | ||
677 | 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 676 | 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
678 | j mcck_skip | 677 | j mcck_skip |
679 | 678 | ||
@@ -714,12 +713,10 @@ ENTRY(restart_int_handler) | |||
714 | */ | 713 | */ |
715 | stack_overflow: | 714 | stack_overflow: |
716 | l %r15,__LC_PANIC_STACK # change to panic stack | 715 | l %r15,__LC_PANIC_STACK # change to panic stack |
717 | ahi %r15,-__PT_SIZE # create pt_regs | 716 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
718 | stm %r0,%r7,__PT_R0(%r15) | 717 | stm %r0,%r7,__PT_R0(%r11) |
719 | stm %r8,%r9,__PT_PSW(%r15) | 718 | stm %r8,%r9,__PT_PSW(%r11) |
720 | mvc __PT_R8(32,%r11),0(%r14) | 719 | mvc __PT_R8(32,%r11),0(%r14) |
721 | lr %r15,%r11 | ||
722 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
723 | l %r1,BASED(1f) | 720 | l %r1,BASED(1f) |
724 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 721 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
725 | lr %r2,%r11 # pass pointer to pt_regs | 722 | lr %r2,%r11 # pass pointer to pt_regs |
@@ -799,15 +796,14 @@ cleanup_system_call: | |||
799 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 796 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
800 | # set up saved register 11 | 797 | # set up saved register 11 |
801 | l %r15,__LC_KERNEL_STACK | 798 | l %r15,__LC_KERNEL_STACK |
802 | ahi %r15,-__PT_SIZE | 799 | la %r9,STACK_FRAME_OVERHEAD(%r15) |
803 | st %r15,12(%r11) # r11 pt_regs pointer | 800 | st %r9,12(%r11) # r11 pt_regs pointer |
804 | # fill pt_regs | 801 | # fill pt_regs |
805 | mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC | 802 | mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC |
806 | stm %r0,%r7,__PT_R0(%r15) | 803 | stm %r0,%r7,__PT_R0(%r9) |
807 | mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW | 804 | mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW |
808 | mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC | 805 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC |
809 | # setup saved register 15 | 806 | # setup saved register 15 |
810 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
811 | st %r15,28(%r11) # r15 stack pointer | 807 | st %r15,28(%r11) # r15 stack pointer |
812 | # set new psw address and exit | 808 | # set new psw address and exit |
813 | l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 | 809 | l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 |
@@ -910,7 +906,6 @@ cleanup_idle_wait: | |||
910 | .Ltrace_enter: .long do_syscall_trace_enter | 906 | .Ltrace_enter: .long do_syscall_trace_enter |
911 | .Ltrace_exit: .long do_syscall_trace_exit | 907 | .Ltrace_exit: .long do_syscall_trace_exit |
912 | .Lschedule_tail: .long schedule_tail | 908 | .Lschedule_tail: .long schedule_tail |
913 | .Lsys_call_table: .long sys_call_table | ||
914 | .Lsysc_per: .long sysc_per + 0x80000000 | 909 | .Lsysc_per: .long sysc_per + 0x80000000 |
915 | #ifdef CONFIG_TRACE_IRQFLAGS | 910 | #ifdef CONFIG_TRACE_IRQFLAGS |
916 | .Lhardirqs_on: .long trace_hardirqs_on_caller | 911 | .Lhardirqs_on: .long trace_hardirqs_on_caller |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index c3a736a3ed44..aa0ab02e9595 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <asm/cputime.h> | 7 | #include <asm/cputime.h> |
8 | 8 | ||
9 | extern void *restart_stack; | 9 | extern void *restart_stack; |
10 | extern unsigned long suspend_zero_pages; | ||
10 | 11 | ||
11 | void system_call(void); | 12 | void system_call(void); |
12 | void pgm_check_handler(void); | 13 | void pgm_check_handler(void); |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 2e6d60c55f90..4c17eece707e 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120 | |||
39 | 39 | ||
40 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 40 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
41 | STACK_SIZE = 1 << STACK_SHIFT | 41 | STACK_SIZE = 1 << STACK_SHIFT |
42 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | ||
42 | 43 | ||
43 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 44 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
44 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) | 45 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
@@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) | |||
124 | srag %r14,%r14,\shift | 125 | srag %r14,%r14,\shift |
125 | jnz 1f | 126 | jnz 1f |
126 | CHECK_STACK 1<<\shift,\savearea | 127 | CHECK_STACK 1<<\shift,\savearea |
128 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
127 | j 2f | 129 | j 2f |
128 | 1: lg %r15,\stack # load target stack | 130 | 1: lg %r15,\stack # load target stack |
129 | 2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 131 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) |
130 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
131 | .endm | 132 | .endm |
132 | 133 | ||
133 | .macro UPDATE_VTIME scratch,enter_timer | 134 | .macro UPDATE_VTIME scratch,enter_timer |
@@ -177,7 +178,7 @@ ENTRY(__switch_to) | |||
177 | lg %r4,__THREAD_info(%r2) # get thread_info of prev | 178 | lg %r4,__THREAD_info(%r2) # get thread_info of prev |
178 | lg %r5,__THREAD_info(%r3) # get thread_info of next | 179 | lg %r5,__THREAD_info(%r3) # get thread_info of next |
179 | lgr %r15,%r5 | 180 | lgr %r15,%r5 |
180 | aghi %r15,STACK_SIZE # end of kernel stack of next | 181 | aghi %r15,STACK_INIT # end of kernel stack of next |
181 | stg %r3,__LC_CURRENT # store task struct of next | 182 | stg %r3,__LC_CURRENT # store task struct of next |
182 | stg %r5,__LC_THREAD_INFO # store thread info of next | 183 | stg %r5,__LC_THREAD_INFO # store thread info of next |
183 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack | 184 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack |
@@ -203,10 +204,8 @@ sysc_stmg: | |||
203 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC | 204 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
204 | lg %r10,__LC_LAST_BREAK | 205 | lg %r10,__LC_LAST_BREAK |
205 | lg %r12,__LC_THREAD_INFO | 206 | lg %r12,__LC_THREAD_INFO |
206 | larl %r13,system_call | ||
207 | sysc_per: | 207 | sysc_per: |
208 | lg %r15,__LC_KERNEL_STACK | 208 | lg %r15,__LC_KERNEL_STACK |
209 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
210 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | 209 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
211 | sysc_vtime: | 210 | sysc_vtime: |
212 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER | 211 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER |
@@ -217,6 +216,7 @@ sysc_vtime: | |||
217 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 216 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
218 | sysc_do_svc: | 217 | sysc_do_svc: |
219 | oi __TI_flags+7(%r12),_TIF_SYSCALL | 218 | oi __TI_flags+7(%r12),_TIF_SYSCALL |
219 | lg %r10,__TI_sysc_table(%r12) # address of system call table | ||
220 | llgh %r8,__PT_INT_CODE+2(%r11) | 220 | llgh %r8,__PT_INT_CODE+2(%r11) |
221 | slag %r8,%r8,2 # shift and test for svc 0 | 221 | slag %r8,%r8,2 # shift and test for svc 0 |
222 | jnz sysc_nr_ok | 222 | jnz sysc_nr_ok |
@@ -227,13 +227,6 @@ sysc_do_svc: | |||
227 | sth %r1,__PT_INT_CODE+2(%r11) | 227 | sth %r1,__PT_INT_CODE+2(%r11) |
228 | slag %r8,%r1,2 | 228 | slag %r8,%r1,2 |
229 | sysc_nr_ok: | 229 | sysc_nr_ok: |
230 | larl %r10,sys_call_table # 64 bit system call table | ||
231 | #ifdef CONFIG_COMPAT | ||
232 | tm __TI_flags+5(%r12),(_TIF_31BIT>>16) | ||
233 | jno sysc_noemu | ||
234 | larl %r10,sys_call_table_emu # 31 bit system call table | ||
235 | sysc_noemu: | ||
236 | #endif | ||
237 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 230 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
238 | stg %r2,__PT_ORIG_GPR2(%r11) | 231 | stg %r2,__PT_ORIG_GPR2(%r11) |
239 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | 232 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
@@ -389,6 +382,7 @@ ENTRY(pgm_check_handler) | |||
389 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 382 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
390 | jnz pgm_svcper # -> single stepped svc | 383 | jnz pgm_svcper # -> single stepped svc |
391 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | 384 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
385 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
392 | j 2f | 386 | j 2f |
393 | 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER | 387 | 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER |
394 | LAST_BREAK %r14 | 388 | LAST_BREAK %r14 |
@@ -398,8 +392,7 @@ ENTRY(pgm_check_handler) | |||
398 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort | 392 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort |
399 | jz 2f | 393 | jz 2f |
400 | mvc __THREAD_trap_tdb(256,%r14),0(%r13) | 394 | mvc __THREAD_trap_tdb(256,%r14),0(%r13) |
401 | 2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 395 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) |
402 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
403 | stmg %r0,%r7,__PT_R0(%r11) | 396 | stmg %r0,%r7,__PT_R0(%r11) |
404 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC | 397 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
405 | stmg %r8,%r9,__PT_PSW(%r11) | 398 | stmg %r8,%r9,__PT_PSW(%r11) |
@@ -526,7 +519,6 @@ io_work: | |||
526 | # | 519 | # |
527 | io_work_user: | 520 | io_work_user: |
528 | lg %r1,__LC_KERNEL_STACK | 521 | lg %r1,__LC_KERNEL_STACK |
529 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
530 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 522 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
531 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | 523 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
532 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 524 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
@@ -688,7 +680,6 @@ mcck_skip: | |||
688 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 680 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
689 | jno mcck_return | 681 | jno mcck_return |
690 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack | 682 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack |
691 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
692 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 683 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
693 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | 684 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
694 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 685 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
@@ -755,14 +746,12 @@ ENTRY(restart_int_handler) | |||
755 | * Setup a pt_regs so that show_trace can provide a good call trace. | 746 | * Setup a pt_regs so that show_trace can provide a good call trace. |
756 | */ | 747 | */ |
757 | stack_overflow: | 748 | stack_overflow: |
758 | lg %r11,__LC_PANIC_STACK # change to panic stack | 749 | lg %r15,__LC_PANIC_STACK # change to panic stack |
759 | aghi %r11,-__PT_SIZE # create pt_regs | 750 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
760 | stmg %r0,%r7,__PT_R0(%r11) | 751 | stmg %r0,%r7,__PT_R0(%r11) |
761 | stmg %r8,%r9,__PT_PSW(%r11) | 752 | stmg %r8,%r9,__PT_PSW(%r11) |
762 | mvc __PT_R8(64,%r11),0(%r14) | 753 | mvc __PT_R8(64,%r11),0(%r14) |
763 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 | 754 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 |
764 | lgr %r15,%r11 | ||
765 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
766 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 755 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
767 | lgr %r2,%r11 # pass pointer to pt_regs | 756 | lgr %r2,%r11 # pass pointer to pt_regs |
768 | jg kernel_stack_overflow | 757 | jg kernel_stack_overflow |
@@ -846,15 +835,14 @@ cleanup_system_call: | |||
846 | mvc __TI_last_break(8,%r12),16(%r11) | 835 | mvc __TI_last_break(8,%r12),16(%r11) |
847 | 0: # set up saved register r11 | 836 | 0: # set up saved register r11 |
848 | lg %r15,__LC_KERNEL_STACK | 837 | lg %r15,__LC_KERNEL_STACK |
849 | aghi %r15,-__PT_SIZE | 838 | la %r9,STACK_FRAME_OVERHEAD(%r15) |
850 | stg %r15,24(%r11) # r11 pt_regs pointer | 839 | stg %r9,24(%r11) # r11 pt_regs pointer |
851 | # fill pt_regs | 840 | # fill pt_regs |
852 | mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC | 841 | mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC |
853 | stmg %r0,%r7,__PT_R0(%r15) | 842 | stmg %r0,%r7,__PT_R0(%r9) |
854 | mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW | 843 | mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW |
855 | mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC | 844 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC |
856 | # setup saved register r15 | 845 | # setup saved register r15 |
857 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
858 | stg %r15,56(%r11) # r15 stack pointer | 846 | stg %r15,56(%r11) # r15 stack pointer |
859 | # set new psw address and exit | 847 | # set new psw address and exit |
860 | larl %r9,sysc_do_svc | 848 | larl %r9,sysc_do_svc |
@@ -1011,6 +999,7 @@ sys_call_table: | |||
1011 | #ifdef CONFIG_COMPAT | 999 | #ifdef CONFIG_COMPAT |
1012 | 1000 | ||
1013 | #define SYSCALL(esa,esame,emu) .long emu | 1001 | #define SYSCALL(esa,esame,emu) .long emu |
1002 | .globl sys_call_table_emu | ||
1014 | sys_call_table_emu: | 1003 | sys_call_table_emu: |
1015 | #include "syscalls.S" | 1004 | #include "syscalls.S" |
1016 | #undef SYSCALL | 1005 | #undef SYSCALL |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b3de27700016..ac2178161ec3 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/reboot.h> | 13 | #include <linux/reboot.h> |
14 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
15 | #include <linux/debug_locks.h> | 15 | #include <linux/debug_locks.h> |
16 | #include <linux/suspend.h> | ||
16 | #include <asm/cio.h> | 17 | #include <asm/cio.h> |
17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
18 | #include <asm/pgtable.h> | 19 | #include <asm/pgtable.h> |
@@ -67,6 +68,35 @@ void setup_regs(void) | |||
67 | memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); | 68 | memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); |
68 | } | 69 | } |
69 | 70 | ||
71 | /* | ||
72 | * PM notifier callback for kdump | ||
73 | */ | ||
74 | static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, | ||
75 | void *ptr) | ||
76 | { | ||
77 | switch (action) { | ||
78 | case PM_SUSPEND_PREPARE: | ||
79 | case PM_HIBERNATION_PREPARE: | ||
80 | if (crashk_res.start) | ||
81 | crash_map_reserved_pages(); | ||
82 | break; | ||
83 | case PM_POST_SUSPEND: | ||
84 | case PM_POST_HIBERNATION: | ||
85 | if (crashk_res.start) | ||
86 | crash_unmap_reserved_pages(); | ||
87 | break; | ||
88 | default: | ||
89 | return NOTIFY_DONE; | ||
90 | } | ||
91 | return NOTIFY_OK; | ||
92 | } | ||
93 | |||
94 | static int __init machine_kdump_pm_init(void) | ||
95 | { | ||
96 | pm_notifier(machine_kdump_pm_cb, 0); | ||
97 | return 0; | ||
98 | } | ||
99 | arch_initcall(machine_kdump_pm_init); | ||
70 | #endif | 100 | #endif |
71 | 101 | ||
72 | /* | 102 | /* |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 29268859d8ee..0f419c5765c8 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -377,11 +377,14 @@ static void __init setup_lowcore(void) | |||
377 | PSW_MASK_DAT | PSW_MASK_MCHECK; | 377 | PSW_MASK_DAT | PSW_MASK_MCHECK; |
378 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 378 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
379 | lc->clock_comparator = -1ULL; | 379 | lc->clock_comparator = -1ULL; |
380 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 380 | lc->kernel_stack = ((unsigned long) &init_thread_union) |
381 | + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); | ||
381 | lc->async_stack = (unsigned long) | 382 | lc->async_stack = (unsigned long) |
382 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | 383 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) |
384 | + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); | ||
383 | lc->panic_stack = (unsigned long) | 385 | lc->panic_stack = (unsigned long) |
384 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; | 386 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) |
387 | + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); | ||
385 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; | 388 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; |
386 | lc->thread_info = (unsigned long) &init_thread_union; | 389 | lc->thread_info = (unsigned long) &init_thread_union; |
387 | lc->machine_flags = S390_lowcore.machine_flags; | 390 | lc->machine_flags = S390_lowcore.machine_flags; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6ff5845679e6..8074cb4b7cbf 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) | |||
181 | lc = pcpu->lowcore; | 181 | lc = pcpu->lowcore; |
182 | memcpy(lc, &S390_lowcore, 512); | 182 | memcpy(lc, &S390_lowcore, 512); |
183 | memset((char *) lc + 512, 0, sizeof(*lc) - 512); | 183 | memset((char *) lc + 512, 0, sizeof(*lc) - 512); |
184 | lc->async_stack = pcpu->async_stack + ASYNC_SIZE; | 184 | lc->async_stack = pcpu->async_stack + ASYNC_SIZE |
185 | lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; | 185 | - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); |
186 | lc->panic_stack = pcpu->panic_stack + PAGE_SIZE | ||
187 | - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); | ||
186 | lc->cpu_nr = cpu; | 188 | lc->cpu_nr = cpu; |
187 | #ifndef CONFIG_64BIT | 189 | #ifndef CONFIG_64BIT |
188 | if (MACHINE_HAS_IEEE) { | 190 | if (MACHINE_HAS_IEEE) { |
@@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) | |||
253 | struct _lowcore *lc = pcpu->lowcore; | 255 | struct _lowcore *lc = pcpu->lowcore; |
254 | struct thread_info *ti = task_thread_info(tsk); | 256 | struct thread_info *ti = task_thread_info(tsk); |
255 | 257 | ||
256 | lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; | 258 | lc->kernel_stack = (unsigned long) task_stack_page(tsk) |
259 | + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); | ||
257 | lc->thread_info = (unsigned long) task_thread_info(tsk); | 260 | lc->thread_info = (unsigned long) task_thread_info(tsk); |
258 | lc->current_task = (unsigned long) tsk; | 261 | lc->current_task = (unsigned long) tsk; |
259 | lc->user_timer = ti->user_timer; | 262 | lc->user_timer = ti->user_timer; |
@@ -809,8 +812,10 @@ void __init smp_prepare_boot_cpu(void) | |||
809 | pcpu->state = CPU_STATE_CONFIGURED; | 812 | pcpu->state = CPU_STATE_CONFIGURED; |
810 | pcpu->address = boot_cpu_address; | 813 | pcpu->address = boot_cpu_address; |
811 | pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); | 814 | pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); |
812 | pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; | 815 | pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE |
813 | pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; | 816 | + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); |
817 | pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE | ||
818 | + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); | ||
814 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 819 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
815 | smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); | 820 | smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); |
816 | set_cpu_present(0, true); | 821 | set_cpu_present(0, true); |
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index aa1494d0e380..c479d2f9605b 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -41,6 +41,7 @@ struct page_key_data { | |||
41 | static struct page_key_data *page_key_data; | 41 | static struct page_key_data *page_key_data; |
42 | static struct page_key_data *page_key_rp, *page_key_wp; | 42 | static struct page_key_data *page_key_rp, *page_key_wp; |
43 | static unsigned long page_key_rx, page_key_wx; | 43 | static unsigned long page_key_rx, page_key_wx; |
44 | unsigned long suspend_zero_pages; | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * For each page in the hibernation image one additional byte is | 47 | * For each page in the hibernation image one additional byte is |
@@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn) | |||
149 | return 0; | 150 | return 0; |
150 | } | 151 | } |
151 | 152 | ||
153 | /* | ||
154 | * PM notifier callback for suspend | ||
155 | */ | ||
156 | static int suspend_pm_cb(struct notifier_block *nb, unsigned long action, | ||
157 | void *ptr) | ||
158 | { | ||
159 | switch (action) { | ||
160 | case PM_SUSPEND_PREPARE: | ||
161 | case PM_HIBERNATION_PREPARE: | ||
162 | suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER); | ||
163 | if (!suspend_zero_pages) | ||
164 | return NOTIFY_BAD; | ||
165 | break; | ||
166 | case PM_POST_SUSPEND: | ||
167 | case PM_POST_HIBERNATION: | ||
168 | free_pages(suspend_zero_pages, LC_ORDER); | ||
169 | break; | ||
170 | default: | ||
171 | return NOTIFY_DONE; | ||
172 | } | ||
173 | return NOTIFY_OK; | ||
174 | } | ||
175 | |||
176 | static int __init suspend_pm_init(void) | ||
177 | { | ||
178 | pm_notifier(suspend_pm_cb, 0); | ||
179 | return 0; | ||
180 | } | ||
181 | arch_initcall(suspend_pm_init); | ||
182 | |||
152 | void save_processor_state(void) | 183 | void save_processor_state(void) |
153 | { | 184 | { |
154 | /* swsusp_arch_suspend() actually saves all cpu register contents. | 185 | /* swsusp_arch_suspend() actually saves all cpu register contents. |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index d4ca4e0617b5..c487be4cfc81 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend) | |||
36 | /* Store prefix register on stack */ | 36 | /* Store prefix register on stack */ |
37 | stpx __SF_EMPTY(%r15) | 37 | stpx __SF_EMPTY(%r15) |
38 | 38 | ||
39 | /* Save prefix register contents for lowcore */ | 39 | /* Save prefix register contents for lowcore copy */ |
40 | llgf %r4,__SF_EMPTY(%r15) | 40 | llgf %r10,__SF_EMPTY(%r15) |
41 | 41 | ||
42 | /* Get pointer to save area */ | 42 | /* Get pointer to save area */ |
43 | lghi %r1,0x1000 | 43 | lghi %r1,0x1000 |
@@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend) | |||
91 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) | 91 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) |
92 | spx __SF_EMPTY(%r15) | 92 | spx __SF_EMPTY(%r15) |
93 | 93 | ||
94 | /* Save absolute zero pages */ | ||
95 | larl %r2,suspend_zero_pages | ||
96 | lg %r2,0(%r2) | ||
97 | lghi %r4,0 | ||
98 | lghi %r3,2*PAGE_SIZE | ||
99 | lghi %r5,2*PAGE_SIZE | ||
100 | 1: mvcle %r2,%r4,0 | ||
101 | jo 1b | ||
102 | |||
103 | /* Copy lowcore to absolute zero lowcore */ | ||
94 | lghi %r2,0 | 104 | lghi %r2,0 |
105 | lgr %r4,%r10 | ||
95 | lghi %r3,2*PAGE_SIZE | 106 | lghi %r3,2*PAGE_SIZE |
96 | lghi %r5,2*PAGE_SIZE | 107 | lghi %r5,2*PAGE_SIZE |
97 | 1: mvcle %r2,%r4,0 | 108 | 1: mvcle %r2,%r4,0 |
@@ -248,8 +259,20 @@ restore_registers: | |||
248 | /* Load old stack */ | 259 | /* Load old stack */ |
249 | lg %r15,0x2f8(%r13) | 260 | lg %r15,0x2f8(%r13) |
250 | 261 | ||
262 | /* Save prefix register */ | ||
263 | mvc __SF_EMPTY(4,%r15),0x318(%r13) | ||
264 | |||
265 | /* Restore absolute zero pages */ | ||
266 | lghi %r2,0 | ||
267 | larl %r4,suspend_zero_pages | ||
268 | lg %r4,0(%r4) | ||
269 | lghi %r3,2*PAGE_SIZE | ||
270 | lghi %r5,2*PAGE_SIZE | ||
271 | 1: mvcle %r2,%r4,0 | ||
272 | jo 1b | ||
273 | |||
251 | /* Restore prefix register */ | 274 | /* Restore prefix register */ |
252 | spx 0x318(%r13) | 275 | spx __SF_EMPTY(%r15) |
253 | 276 | ||
254 | /* Activate DAT */ | 277 | /* Activate DAT */ |
255 | stosm __SF_EMPTY(%r15),0x04 | 278 | stosm __SF_EMPTY(%r15),0x04 |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 13dd63fba367..c5762324d9ee 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -12,49 +12,16 @@ | |||
12 | * 'Traps.c' handles hardware traps and faults after we have saved some | 12 | * 'Traps.c' handles hardware traps and faults after we have saved some |
13 | * state in 'asm.s'. | 13 | * state in 'asm.s'. |
14 | */ | 14 | */ |
15 | #include <linux/sched.h> | 15 | #include <linux/kprobes.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kdebug.h> |
17 | #include <linux/string.h> | 17 | #include <linux/module.h> |
18 | #include <linux/errno.h> | ||
19 | #include <linux/ptrace.h> | 18 | #include <linux/ptrace.h> |
20 | #include <linux/timer.h> | 19 | #include <linux/sched.h> |
21 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
22 | #include <linux/smp.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/seq_file.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/kdebug.h> | ||
29 | #include <linux/kallsyms.h> | ||
30 | #include <linux/reboot.h> | ||
31 | #include <linux/kprobes.h> | ||
32 | #include <linux/bug.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <asm/uaccess.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <linux/atomic.h> | ||
37 | #include <asm/mathemu.h> | ||
38 | #include <asm/cpcmd.h> | ||
39 | #include <asm/lowcore.h> | ||
40 | #include <asm/debug.h> | ||
41 | #include <asm/ipl.h> | ||
42 | #include "entry.h" | 21 | #include "entry.h" |
43 | 22 | ||
44 | int show_unhandled_signals = 1; | 23 | int show_unhandled_signals = 1; |
45 | 24 | ||
46 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) | ||
47 | |||
48 | #ifndef CONFIG_64BIT | ||
49 | #define LONG "%08lx " | ||
50 | #define FOURLONG "%08lx %08lx %08lx %08lx\n" | ||
51 | static int kstack_depth_to_print = 12; | ||
52 | #else /* CONFIG_64BIT */ | ||
53 | #define LONG "%016lx " | ||
54 | #define FOURLONG "%016lx %016lx %016lx %016lx\n" | ||
55 | static int kstack_depth_to_print = 20; | ||
56 | #endif /* CONFIG_64BIT */ | ||
57 | |||
58 | static inline void __user *get_trap_ip(struct pt_regs *regs) | 25 | static inline void __user *get_trap_ip(struct pt_regs *regs) |
59 | { | 26 | { |
60 | #ifdef CONFIG_64BIT | 27 | #ifdef CONFIG_64BIT |
@@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) | |||
72 | #endif | 39 | #endif |
73 | } | 40 | } |
74 | 41 | ||
75 | /* | ||
76 | * For show_trace we have tree different stack to consider: | ||
77 | * - the panic stack which is used if the kernel stack has overflown | ||
78 | * - the asynchronous interrupt stack (cpu related) | ||
79 | * - the synchronous kernel stack (process related) | ||
80 | * The stack trace can start at any of the three stack and can potentially | ||
81 | * touch all of them. The order is: panic stack, async stack, sync stack. | ||
82 | */ | ||
83 | static unsigned long | ||
84 | __show_trace(unsigned long sp, unsigned long low, unsigned long high) | ||
85 | { | ||
86 | struct stack_frame *sf; | ||
87 | struct pt_regs *regs; | ||
88 | |||
89 | while (1) { | ||
90 | sp = sp & PSW_ADDR_INSN; | ||
91 | if (sp < low || sp > high - sizeof(*sf)) | ||
92 | return sp; | ||
93 | sf = (struct stack_frame *) sp; | ||
94 | printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); | ||
95 | print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); | ||
96 | /* Follow the backchain. */ | ||
97 | while (1) { | ||
98 | low = sp; | ||
99 | sp = sf->back_chain & PSW_ADDR_INSN; | ||
100 | if (!sp) | ||
101 | break; | ||
102 | if (sp <= low || sp > high - sizeof(*sf)) | ||
103 | return sp; | ||
104 | sf = (struct stack_frame *) sp; | ||
105 | printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); | ||
106 | print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); | ||
107 | } | ||
108 | /* Zero backchain detected, check for interrupt frame. */ | ||
109 | sp = (unsigned long) (sf + 1); | ||
110 | if (sp <= low || sp > high - sizeof(*regs)) | ||
111 | return sp; | ||
112 | regs = (struct pt_regs *) sp; | ||
113 | printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); | ||
114 | print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); | ||
115 | low = sp; | ||
116 | sp = regs->gprs[15]; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static void show_trace(struct task_struct *task, unsigned long *stack) | ||
121 | { | ||
122 | register unsigned long __r15 asm ("15"); | ||
123 | unsigned long sp; | ||
124 | |||
125 | sp = (unsigned long) stack; | ||
126 | if (!sp) | ||
127 | sp = task ? task->thread.ksp : __r15; | ||
128 | printk("Call Trace:\n"); | ||
129 | #ifdef CONFIG_CHECK_STACK | ||
130 | sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, | ||
131 | S390_lowcore.panic_stack); | ||
132 | #endif | ||
133 | sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, | ||
134 | S390_lowcore.async_stack); | ||
135 | if (task) | ||
136 | __show_trace(sp, (unsigned long) task_stack_page(task), | ||
137 | (unsigned long) task_stack_page(task) + THREAD_SIZE); | ||
138 | else | ||
139 | __show_trace(sp, S390_lowcore.thread_info, | ||
140 | S390_lowcore.thread_info + THREAD_SIZE); | ||
141 | if (!task) | ||
142 | task = current; | ||
143 | debug_show_held_locks(task); | ||
144 | } | ||
145 | |||
146 | void show_stack(struct task_struct *task, unsigned long *sp) | ||
147 | { | ||
148 | register unsigned long * __r15 asm ("15"); | ||
149 | unsigned long *stack; | ||
150 | int i; | ||
151 | |||
152 | if (!sp) | ||
153 | stack = task ? (unsigned long *) task->thread.ksp : __r15; | ||
154 | else | ||
155 | stack = sp; | ||
156 | |||
157 | for (i = 0; i < kstack_depth_to_print; i++) { | ||
158 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) | ||
159 | break; | ||
160 | if ((i * sizeof(long) % 32) == 0) | ||
161 | printk("%s ", i == 0 ? "" : "\n"); | ||
162 | printk(LONG, *stack++); | ||
163 | } | ||
164 | printk("\n"); | ||
165 | show_trace(task, sp); | ||
166 | } | ||
167 | |||
168 | static void show_last_breaking_event(struct pt_regs *regs) | ||
169 | { | ||
170 | #ifdef CONFIG_64BIT | ||
171 | printk("Last Breaking-Event-Address:\n"); | ||
172 | printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); | ||
173 | print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); | ||
174 | #endif | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * The architecture-independent dump_stack generator | ||
179 | */ | ||
180 | void dump_stack(void) | ||
181 | { | ||
182 | printk("CPU: %d %s %s %.*s\n", | ||
183 | task_thread_info(current)->cpu, print_tainted(), | ||
184 | init_utsname()->release, | ||
185 | (int)strcspn(init_utsname()->version, " "), | ||
186 | init_utsname()->version); | ||
187 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | ||
188 | current->comm, current->pid, current, | ||
189 | (void *) current->thread.ksp); | ||
190 | show_stack(NULL, NULL); | ||
191 | } | ||
192 | EXPORT_SYMBOL(dump_stack); | ||
193 | |||
194 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) | ||
195 | { | ||
196 | return (regs->psw.mask & bits) / ((~bits + 1) & bits); | ||
197 | } | ||
198 | |||
199 | void show_registers(struct pt_regs *regs) | ||
200 | { | ||
201 | char *mode; | ||
202 | |||
203 | mode = user_mode(regs) ? "User" : "Krnl"; | ||
204 | printk("%s PSW : %p %p", | ||
205 | mode, (void *) regs->psw.mask, | ||
206 | (void *) regs->psw.addr); | ||
207 | print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); | ||
208 | printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " | ||
209 | "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), | ||
210 | mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), | ||
211 | mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), | ||
212 | mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), | ||
213 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), | ||
214 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); | ||
215 | #ifdef CONFIG_64BIT | ||
216 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); | ||
217 | #endif | ||
218 | printk("\n%s GPRS: " FOURLONG, mode, | ||
219 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | ||
220 | printk(" " FOURLONG, | ||
221 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); | ||
222 | printk(" " FOURLONG, | ||
223 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); | ||
224 | printk(" " FOURLONG, | ||
225 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); | ||
226 | |||
227 | show_code(regs); | ||
228 | } | ||
229 | |||
230 | void show_regs(struct pt_regs *regs) | ||
231 | { | ||
232 | printk("CPU: %d %s %s %.*s\n", | ||
233 | task_thread_info(current)->cpu, print_tainted(), | ||
234 | init_utsname()->release, | ||
235 | (int)strcspn(init_utsname()->version, " "), | ||
236 | init_utsname()->version); | ||
237 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | ||
238 | current->comm, current->pid, current, | ||
239 | (void *) current->thread.ksp); | ||
240 | show_registers(regs); | ||
241 | /* Show stack backtrace if pt_regs is from kernel mode */ | ||
242 | if (!user_mode(regs)) | ||
243 | show_trace(NULL, (unsigned long *) regs->gprs[15]); | ||
244 | show_last_breaking_event(regs); | ||
245 | } | ||
246 | |||
247 | static DEFINE_SPINLOCK(die_lock); | ||
248 | |||
249 | void die(struct pt_regs *regs, const char *str) | ||
250 | { | ||
251 | static int die_counter; | ||
252 | |||
253 | oops_enter(); | ||
254 | lgr_info_log(); | ||
255 | debug_stop_all(); | ||
256 | console_verbose(); | ||
257 | spin_lock_irq(&die_lock); | ||
258 | bust_spinlocks(1); | ||
259 | printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); | ||
260 | #ifdef CONFIG_PREEMPT | ||
261 | printk("PREEMPT "); | ||
262 | #endif | ||
263 | #ifdef CONFIG_SMP | ||
264 | printk("SMP "); | ||
265 | #endif | ||
266 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
267 | printk("DEBUG_PAGEALLOC"); | ||
268 | #endif | ||
269 | printk("\n"); | ||
270 | notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); | ||
271 | print_modules(); | ||
272 | show_regs(regs); | ||
273 | bust_spinlocks(0); | ||
274 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); | ||
275 | spin_unlock_irq(&die_lock); | ||
276 | if (in_interrupt()) | ||
277 | panic("Fatal exception in interrupt"); | ||
278 | if (panic_on_oops) | ||
279 | panic("Fatal exception: panic_on_oops"); | ||
280 | oops_exit(); | ||
281 | do_exit(SIGSEGV); | ||
282 | } | ||
283 | |||
284 | static inline void report_user_fault(struct pt_regs *regs, int signr) | 42 | static inline void report_user_fault(struct pt_regs *regs, int signr) |
285 | { | 43 | { |
286 | if ((task_pid_nr(current) > 1) && !show_unhandled_signals) | 44 | if ((task_pid_nr(current) > 1) && !show_unhandled_signals) |
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h index 2b29e62351d3..53252d2d4720 100644 --- a/arch/s390/kvm/trace.h +++ b/arch/s390/kvm/trace.h | |||
@@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction, | |||
117 | __entry->instruction, | 117 | __entry->instruction, |
118 | insn_to_mnemonic((unsigned char *) | 118 | insn_to_mnemonic((unsigned char *) |
119 | &__entry->instruction, | 119 | &__entry->instruction, |
120 | __entry->insn) ? | 120 | __entry->insn, sizeof(__entry->insn)) ? |
121 | "unknown" : __entry->insn) | 121 | "unknown" : __entry->insn) |
122 | ); | 122 | ); |
123 | 123 | ||
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 479e94282910..9d84a1feefef 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -458,12 +458,10 @@ static int __init cmm_init(void) | |||
458 | if (rc) | 458 | if (rc) |
459 | goto out_pm; | 459 | goto out_pm; |
460 | cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); | 460 | cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); |
461 | rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; | 461 | if (!IS_ERR(cmm_thread_ptr)) |
462 | if (rc) | 462 | return 0; |
463 | goto out_kthread; | ||
464 | return 0; | ||
465 | 463 | ||
466 | out_kthread: | 464 | rc = PTR_ERR(cmm_thread_ptr); |
467 | unregister_pm_notifier(&cmm_power_notifier); | 465 | unregister_pm_notifier(&cmm_power_notifier); |
468 | out_pm: | 466 | out_pm: |
469 | unregister_oom_notifier(&cmm_oom_nb); | 467 | unregister_oom_notifier(&cmm_oom_nb); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2fb9e63b8fc4..047c3e4c59a2 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs) | |||
395 | int fault; | 395 | int fault; |
396 | 396 | ||
397 | trans_exc_code = regs->int_parm_long; | 397 | trans_exc_code = regs->int_parm_long; |
398 | /* Protection exception is suppressing, decrement psw address. */ | 398 | /* |
399 | regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); | 399 | * Protection exceptions are suppressing, decrement psw address. |
400 | * The exception to this rule are aborted transactions, for these | ||
401 | * the PSW already points to the correct location. | ||
402 | */ | ||
403 | if (!(regs->int_code & 0x200)) | ||
404 | regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); | ||
400 | /* | 405 | /* |
401 | * Check for low-address protection. This needs to be treated | 406 | * Check for low-address protection. This needs to be treated |
402 | * as a special case because the translation exception code | 407 | * as a special case because the translation exception code |
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 532525ec88c1..121089d57802 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page) | |||
39 | if (!ptep) | 39 | if (!ptep) |
40 | return -ENOMEM; | 40 | return -ENOMEM; |
41 | 41 | ||
42 | pte = mk_pte(page, PAGE_RW); | 42 | pte_val(pte) = addr; |
43 | for (i = 0; i < PTRS_PER_PTE; i++) { | 43 | for (i = 0; i < PTRS_PER_PTE; i++) { |
44 | set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); | 44 | set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); |
45 | pte_val(pte) += PAGE_SIZE; | 45 | pte_val(pte) += PAGE_SIZE; |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 49ce6bb2c641..0b09b2342302 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -42,11 +42,10 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | |||
42 | unsigned long empty_zero_page, zero_page_mask; | 42 | unsigned long empty_zero_page, zero_page_mask; |
43 | EXPORT_SYMBOL(empty_zero_page); | 43 | EXPORT_SYMBOL(empty_zero_page); |
44 | 44 | ||
45 | static unsigned long __init setup_zero_pages(void) | 45 | static void __init setup_zero_pages(void) |
46 | { | 46 | { |
47 | struct cpuid cpu_id; | 47 | struct cpuid cpu_id; |
48 | unsigned int order; | 48 | unsigned int order; |
49 | unsigned long size; | ||
50 | struct page *page; | 49 | struct page *page; |
51 | int i; | 50 | int i; |
52 | 51 | ||
@@ -63,10 +62,18 @@ static unsigned long __init setup_zero_pages(void) | |||
63 | break; | 62 | break; |
64 | case 0x2097: /* z10 */ | 63 | case 0x2097: /* z10 */ |
65 | case 0x2098: /* z10 */ | 64 | case 0x2098: /* z10 */ |
66 | default: | 65 | case 0x2817: /* z196 */ |
66 | case 0x2818: /* z196 */ | ||
67 | order = 2; | 67 | order = 2; |
68 | break; | 68 | break; |
69 | case 0x2827: /* zEC12 */ | ||
70 | default: | ||
71 | order = 5; | ||
72 | break; | ||
69 | } | 73 | } |
74 | /* Limit number of empty zero pages for small memory sizes */ | ||
75 | if (order > 2 && totalram_pages <= 16384) | ||
76 | order = 2; | ||
70 | 77 | ||
71 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | 78 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
72 | if (!empty_zero_page) | 79 | if (!empty_zero_page) |
@@ -75,14 +82,11 @@ static unsigned long __init setup_zero_pages(void) | |||
75 | page = virt_to_page((void *) empty_zero_page); | 82 | page = virt_to_page((void *) empty_zero_page); |
76 | split_page(page, order); | 83 | split_page(page, order); |
77 | for (i = 1 << order; i > 0; i--) { | 84 | for (i = 1 << order; i > 0; i--) { |
78 | SetPageReserved(page); | 85 | mark_page_reserved(page); |
79 | page++; | 86 | page++; |
80 | } | 87 | } |
81 | 88 | ||
82 | size = PAGE_SIZE << order; | 89 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
83 | zero_page_mask = (size - 1) & PAGE_MASK; | ||
84 | |||
85 | return 1UL << order; | ||
86 | } | 90 | } |
87 | 91 | ||
88 | /* | 92 | /* |
@@ -139,7 +143,7 @@ void __init mem_init(void) | |||
139 | 143 | ||
140 | /* this will put all low memory onto the freelists */ | 144 | /* this will put all low memory onto the freelists */ |
141 | totalram_pages += free_all_bootmem(); | 145 | totalram_pages += free_all_bootmem(); |
142 | totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ | 146 | setup_zero_pages(); /* Setup zeroed pages. */ |
143 | 147 | ||
144 | reservedpages = 0; | 148 | reservedpages = 0; |
145 | 149 | ||
@@ -158,34 +162,15 @@ void __init mem_init(void) | |||
158 | PFN_ALIGN((unsigned long)&_eshared) - 1); | 162 | PFN_ALIGN((unsigned long)&_eshared) - 1); |
159 | } | 163 | } |
160 | 164 | ||
161 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
162 | { | ||
163 | unsigned long addr = begin; | ||
164 | |||
165 | if (begin >= end) | ||
166 | return; | ||
167 | for (; addr < end; addr += PAGE_SIZE) { | ||
168 | ClearPageReserved(virt_to_page(addr)); | ||
169 | init_page_count(virt_to_page(addr)); | ||
170 | memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, | ||
171 | PAGE_SIZE); | ||
172 | free_page(addr); | ||
173 | totalram_pages++; | ||
174 | } | ||
175 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | ||
176 | } | ||
177 | |||
178 | void free_initmem(void) | 165 | void free_initmem(void) |
179 | { | 166 | { |
180 | free_init_pages("unused kernel memory", | 167 | free_initmem_default(0); |
181 | (unsigned long)&__init_begin, | ||
182 | (unsigned long)&__init_end); | ||
183 | } | 168 | } |
184 | 169 | ||
185 | #ifdef CONFIG_BLK_DEV_INITRD | 170 | #ifdef CONFIG_BLK_DEV_INITRD |
186 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 171 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
187 | { | 172 | { |
188 | free_init_pages("initrd memory", start, end); | 173 | free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); |
189 | } | 174 | } |
190 | #endif | 175 | #endif |
191 | 176 | ||
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index d21040ed5e59..80adfbf75065 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -9,31 +9,25 @@ | |||
9 | #include <asm/pgtable.h> | 9 | #include <asm/pgtable.h> |
10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
11 | 11 | ||
12 | static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) | ||
13 | { | ||
14 | asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" | ||
15 | : [addr] "+a" (addr) : [skey] "d" (skey)); | ||
16 | return addr; | ||
17 | } | ||
18 | |||
12 | void storage_key_init_range(unsigned long start, unsigned long end) | 19 | void storage_key_init_range(unsigned long start, unsigned long end) |
13 | { | 20 | { |
14 | unsigned long boundary, function, size; | 21 | unsigned long boundary, size; |
15 | 22 | ||
16 | while (start < end) { | 23 | while (start < end) { |
17 | if (MACHINE_HAS_EDAT2) { | ||
18 | /* set storage keys for a 2GB frame */ | ||
19 | function = 0x22000 | PAGE_DEFAULT_KEY; | ||
20 | size = 1UL << 31; | ||
21 | boundary = (start + size) & ~(size - 1); | ||
22 | if (boundary <= end) { | ||
23 | do { | ||
24 | start = pfmf(function, start); | ||
25 | } while (start < boundary); | ||
26 | continue; | ||
27 | } | ||
28 | } | ||
29 | if (MACHINE_HAS_EDAT1) { | 24 | if (MACHINE_HAS_EDAT1) { |
30 | /* set storage keys for a 1MB frame */ | 25 | /* set storage keys for a 1MB frame */ |
31 | function = 0x21000 | PAGE_DEFAULT_KEY; | ||
32 | size = 1UL << 20; | 26 | size = 1UL << 20; |
33 | boundary = (start + size) & ~(size - 1); | 27 | boundary = (start + size) & ~(size - 1); |
34 | if (boundary <= end) { | 28 | if (boundary <= end) { |
35 | do { | 29 | do { |
36 | start = pfmf(function, start); | 30 | start = sske_frame(start, PAGE_DEFAULT_KEY); |
37 | } while (start < boundary); | 31 | } while (start < boundary); |
38 | continue; | 32 | continue; |
39 | } | 33 | } |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index ae44d2a34313..bd954e96f51c 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -379,75 +379,183 @@ out_unmap: | |||
379 | } | 379 | } |
380 | EXPORT_SYMBOL_GPL(gmap_map_segment); | 380 | EXPORT_SYMBOL_GPL(gmap_map_segment); |
381 | 381 | ||
382 | /* | 382 | static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) |
383 | * this function is assumed to be called with mmap_sem held | ||
384 | */ | ||
385 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | ||
386 | { | 383 | { |
387 | unsigned long *table, vmaddr, segment; | 384 | unsigned long *table; |
388 | struct mm_struct *mm; | ||
389 | struct gmap_pgtable *mp; | ||
390 | struct gmap_rmap *rmap; | ||
391 | struct vm_area_struct *vma; | ||
392 | struct page *page; | ||
393 | pgd_t *pgd; | ||
394 | pud_t *pud; | ||
395 | pmd_t *pmd; | ||
396 | 385 | ||
397 | current->thread.gmap_addr = address; | ||
398 | mm = gmap->mm; | ||
399 | /* Walk the gmap address space page table */ | ||
400 | table = gmap->table + ((address >> 53) & 0x7ff); | 386 | table = gmap->table + ((address >> 53) & 0x7ff); |
401 | if (unlikely(*table & _REGION_ENTRY_INV)) | 387 | if (unlikely(*table & _REGION_ENTRY_INV)) |
402 | return -EFAULT; | 388 | return ERR_PTR(-EFAULT); |
403 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 389 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
404 | table = table + ((address >> 42) & 0x7ff); | 390 | table = table + ((address >> 42) & 0x7ff); |
405 | if (unlikely(*table & _REGION_ENTRY_INV)) | 391 | if (unlikely(*table & _REGION_ENTRY_INV)) |
406 | return -EFAULT; | 392 | return ERR_PTR(-EFAULT); |
407 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 393 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
408 | table = table + ((address >> 31) & 0x7ff); | 394 | table = table + ((address >> 31) & 0x7ff); |
409 | if (unlikely(*table & _REGION_ENTRY_INV)) | 395 | if (unlikely(*table & _REGION_ENTRY_INV)) |
410 | return -EFAULT; | 396 | return ERR_PTR(-EFAULT); |
411 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 397 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
412 | table = table + ((address >> 20) & 0x7ff); | 398 | table = table + ((address >> 20) & 0x7ff); |
399 | return table; | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * __gmap_translate - translate a guest address to a user space address | ||
404 | * @address: guest address | ||
405 | * @gmap: pointer to guest mapping meta data structure | ||
406 | * | ||
407 | * Returns user space address which corresponds to the guest address or | ||
408 | * -EFAULT if no such mapping exists. | ||
409 | * This function does not establish potentially missing page table entries. | ||
410 | * The mmap_sem of the mm that belongs to the address space must be held | ||
411 | * when this function gets called. | ||
412 | */ | ||
413 | unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) | ||
414 | { | ||
415 | unsigned long *segment_ptr, vmaddr, segment; | ||
416 | struct gmap_pgtable *mp; | ||
417 | struct page *page; | ||
413 | 418 | ||
419 | current->thread.gmap_addr = address; | ||
420 | segment_ptr = gmap_table_walk(address, gmap); | ||
421 | if (IS_ERR(segment_ptr)) | ||
422 | return PTR_ERR(segment_ptr); | ||
414 | /* Convert the gmap address to an mm address. */ | 423 | /* Convert the gmap address to an mm address. */ |
415 | segment = *table; | 424 | segment = *segment_ptr; |
416 | if (likely(!(segment & _SEGMENT_ENTRY_INV))) { | 425 | if (!(segment & _SEGMENT_ENTRY_INV)) { |
417 | page = pfn_to_page(segment >> PAGE_SHIFT); | 426 | page = pfn_to_page(segment >> PAGE_SHIFT); |
418 | mp = (struct gmap_pgtable *) page->index; | 427 | mp = (struct gmap_pgtable *) page->index; |
419 | return mp->vmaddr | (address & ~PMD_MASK); | 428 | return mp->vmaddr | (address & ~PMD_MASK); |
420 | } else if (segment & _SEGMENT_ENTRY_RO) { | 429 | } else if (segment & _SEGMENT_ENTRY_RO) { |
421 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; | 430 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; |
422 | vma = find_vma(mm, vmaddr); | 431 | return vmaddr | (address & ~PMD_MASK); |
423 | if (!vma || vma->vm_start > vmaddr) | 432 | } |
424 | return -EFAULT; | 433 | return -EFAULT; |
425 | 434 | } | |
426 | /* Walk the parent mm page table */ | 435 | EXPORT_SYMBOL_GPL(__gmap_translate); |
427 | pgd = pgd_offset(mm, vmaddr); | 436 | |
428 | pud = pud_alloc(mm, pgd, vmaddr); | 437 | /** |
429 | if (!pud) | 438 | * gmap_translate - translate a guest address to a user space address |
430 | return -ENOMEM; | 439 | * @address: guest address |
431 | pmd = pmd_alloc(mm, pud, vmaddr); | 440 | * @gmap: pointer to guest mapping meta data structure |
432 | if (!pmd) | 441 | * |
433 | return -ENOMEM; | 442 | * Returns user space address which corresponds to the guest address or |
434 | if (!pmd_present(*pmd) && | 443 | * -EFAULT if no such mapping exists. |
435 | __pte_alloc(mm, vma, pmd, vmaddr)) | 444 | * This function does not establish potentially missing page table entries. |
436 | return -ENOMEM; | 445 | */ |
437 | /* pmd now points to a valid segment table entry. */ | 446 | unsigned long gmap_translate(unsigned long address, struct gmap *gmap) |
438 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); | 447 | { |
439 | if (!rmap) | 448 | unsigned long rc; |
440 | return -ENOMEM; | 449 | |
441 | /* Link gmap segment table entry location to page table. */ | 450 | down_read(&gmap->mm->mmap_sem); |
442 | page = pmd_page(*pmd); | 451 | rc = __gmap_translate(address, gmap); |
443 | mp = (struct gmap_pgtable *) page->index; | 452 | up_read(&gmap->mm->mmap_sem); |
444 | rmap->entry = table; | 453 | return rc; |
445 | spin_lock(&mm->page_table_lock); | 454 | } |
455 | EXPORT_SYMBOL_GPL(gmap_translate); | ||
456 | |||
457 | static int gmap_connect_pgtable(unsigned long segment, | ||
458 | unsigned long *segment_ptr, | ||
459 | struct gmap *gmap) | ||
460 | { | ||
461 | unsigned long vmaddr; | ||
462 | struct vm_area_struct *vma; | ||
463 | struct gmap_pgtable *mp; | ||
464 | struct gmap_rmap *rmap; | ||
465 | struct mm_struct *mm; | ||
466 | struct page *page; | ||
467 | pgd_t *pgd; | ||
468 | pud_t *pud; | ||
469 | pmd_t *pmd; | ||
470 | |||
471 | mm = gmap->mm; | ||
472 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; | ||
473 | vma = find_vma(mm, vmaddr); | ||
474 | if (!vma || vma->vm_start > vmaddr) | ||
475 | return -EFAULT; | ||
476 | /* Walk the parent mm page table */ | ||
477 | pgd = pgd_offset(mm, vmaddr); | ||
478 | pud = pud_alloc(mm, pgd, vmaddr); | ||
479 | if (!pud) | ||
480 | return -ENOMEM; | ||
481 | pmd = pmd_alloc(mm, pud, vmaddr); | ||
482 | if (!pmd) | ||
483 | return -ENOMEM; | ||
484 | if (!pmd_present(*pmd) && | ||
485 | __pte_alloc(mm, vma, pmd, vmaddr)) | ||
486 | return -ENOMEM; | ||
487 | /* pmd now points to a valid segment table entry. */ | ||
488 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); | ||
489 | if (!rmap) | ||
490 | return -ENOMEM; | ||
491 | /* Link gmap segment table entry location to page table. */ | ||
492 | page = pmd_page(*pmd); | ||
493 | mp = (struct gmap_pgtable *) page->index; | ||
494 | rmap->entry = segment_ptr; | ||
495 | spin_lock(&mm->page_table_lock); | ||
496 | if (*segment_ptr == segment) { | ||
446 | list_add(&rmap->list, &mp->mapper); | 497 | list_add(&rmap->list, &mp->mapper); |
447 | spin_unlock(&mm->page_table_lock); | ||
448 | /* Set gmap segment table entry to page table. */ | 498 | /* Set gmap segment table entry to page table. */ |
449 | *table = pmd_val(*pmd) & PAGE_MASK; | 499 | *segment_ptr = pmd_val(*pmd) & PAGE_MASK; |
450 | return vmaddr | (address & ~PMD_MASK); | 500 | rmap = NULL; |
501 | } | ||
502 | spin_unlock(&mm->page_table_lock); | ||
503 | kfree(rmap); | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) | ||
508 | { | ||
509 | struct gmap_rmap *rmap, *next; | ||
510 | struct gmap_pgtable *mp; | ||
511 | struct page *page; | ||
512 | int flush; | ||
513 | |||
514 | flush = 0; | ||
515 | spin_lock(&mm->page_table_lock); | ||
516 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | ||
517 | mp = (struct gmap_pgtable *) page->index; | ||
518 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { | ||
519 | *rmap->entry = | ||
520 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | ||
521 | list_del(&rmap->list); | ||
522 | kfree(rmap); | ||
523 | flush = 1; | ||
524 | } | ||
525 | spin_unlock(&mm->page_table_lock); | ||
526 | if (flush) | ||
527 | __tlb_flush_global(); | ||
528 | } | ||
529 | |||
530 | /* | ||
531 | * this function is assumed to be called with mmap_sem held | ||
532 | */ | ||
533 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | ||
534 | { | ||
535 | unsigned long *segment_ptr, segment; | ||
536 | struct gmap_pgtable *mp; | ||
537 | struct page *page; | ||
538 | int rc; | ||
539 | |||
540 | current->thread.gmap_addr = address; | ||
541 | segment_ptr = gmap_table_walk(address, gmap); | ||
542 | if (IS_ERR(segment_ptr)) | ||
543 | return -EFAULT; | ||
544 | /* Convert the gmap address to an mm address. */ | ||
545 | while (1) { | ||
546 | segment = *segment_ptr; | ||
547 | if (!(segment & _SEGMENT_ENTRY_INV)) { | ||
548 | /* Page table is present */ | ||
549 | page = pfn_to_page(segment >> PAGE_SHIFT); | ||
550 | mp = (struct gmap_pgtable *) page->index; | ||
551 | return mp->vmaddr | (address & ~PMD_MASK); | ||
552 | } | ||
553 | if (!(segment & _SEGMENT_ENTRY_RO)) | ||
554 | /* Nothing mapped in the gmap address space. */ | ||
555 | break; | ||
556 | rc = gmap_connect_pgtable(segment, segment_ptr, gmap); | ||
557 | if (rc) | ||
558 | return rc; | ||
451 | } | 559 | } |
452 | return -EFAULT; | 560 | return -EFAULT; |
453 | } | 561 | } |
@@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) | |||
511 | } | 619 | } |
512 | EXPORT_SYMBOL_GPL(gmap_discard); | 620 | EXPORT_SYMBOL_GPL(gmap_discard); |
513 | 621 | ||
514 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) | ||
515 | { | ||
516 | struct gmap_rmap *rmap, *next; | ||
517 | struct gmap_pgtable *mp; | ||
518 | struct page *page; | ||
519 | int flush; | ||
520 | |||
521 | flush = 0; | ||
522 | spin_lock(&mm->page_table_lock); | ||
523 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | ||
524 | mp = (struct gmap_pgtable *) page->index; | ||
525 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { | ||
526 | *rmap->entry = | ||
527 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | ||
528 | list_del(&rmap->list); | ||
529 | kfree(rmap); | ||
530 | flush = 1; | ||
531 | } | ||
532 | spin_unlock(&mm->page_table_lock); | ||
533 | if (flush) | ||
534 | __tlb_flush_global(); | ||
535 | } | ||
536 | |||
537 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | 622 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
538 | unsigned long vmaddr) | 623 | unsigned long vmaddr) |
539 | { | 624 | { |
@@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table) | |||
586 | { | 671 | { |
587 | } | 672 | } |
588 | 673 | ||
589 | static inline void gmap_unmap_notifier(struct mm_struct *mm, | 674 | static inline void gmap_disconnect_pgtable(struct mm_struct *mm, |
590 | unsigned long *table) | 675 | unsigned long *table) |
591 | { | 676 | { |
592 | } | 677 | } |
593 | 678 | ||
@@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
653 | unsigned int bit, mask; | 738 | unsigned int bit, mask; |
654 | 739 | ||
655 | if (mm_has_pgste(mm)) { | 740 | if (mm_has_pgste(mm)) { |
656 | gmap_unmap_notifier(mm, table); | 741 | gmap_disconnect_pgtable(mm, table); |
657 | return page_table_free_pgste(table); | 742 | return page_table_free_pgste(table); |
658 | } | 743 | } |
659 | /* Free 1K/2K page table fragment of a 4K page */ | 744 | /* Free 1K/2K page table fragment of a 4K page */ |
@@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) | |||
696 | 781 | ||
697 | mm = tlb->mm; | 782 | mm = tlb->mm; |
698 | if (mm_has_pgste(mm)) { | 783 | if (mm_has_pgste(mm)) { |
699 | gmap_unmap_notifier(mm, table); | 784 | gmap_disconnect_pgtable(mm, table); |
700 | table = (unsigned long *) (__pa(table) | FRAG_MASK); | 785 | table = (unsigned long *) (__pa(table) | FRAG_MASK); |
701 | tlb_remove_table(tlb, table); | 786 | tlb_remove_table(tlb, table); |
702 | return; | 787 | return; |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ffab84db6907..35837054f734 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
191 | /* | 191 | /* |
192 | * Add a backed mem_map array to the virtual mem_map array. | 192 | * Add a backed mem_map array to the virtual mem_map array. |
193 | */ | 193 | */ |
194 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) | 194 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
195 | { | 195 | { |
196 | unsigned long address, start_addr, end_addr; | 196 | unsigned long address = start; |
197 | pgd_t *pg_dir; | 197 | pgd_t *pg_dir; |
198 | pud_t *pu_dir; | 198 | pud_t *pu_dir; |
199 | pmd_t *pm_dir; | 199 | pmd_t *pm_dir; |
200 | pte_t *pt_dir; | 200 | pte_t *pt_dir; |
201 | int ret = -ENOMEM; | 201 | int ret = -ENOMEM; |
202 | 202 | ||
203 | start_addr = (unsigned long) start; | 203 | for (address = start; address < end;) { |
204 | end_addr = (unsigned long) (start + nr); | ||
205 | |||
206 | for (address = start_addr; address < end_addr;) { | ||
207 | pg_dir = pgd_offset_k(address); | 204 | pg_dir = pgd_offset_k(address); |
208 | if (pgd_none(*pg_dir)) { | 205 | if (pgd_none(*pg_dir)) { |
209 | pu_dir = vmem_pud_alloc(); | 206 | pu_dir = vmem_pud_alloc(); |
@@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) | |||
262 | } | 259 | } |
263 | address += PAGE_SIZE; | 260 | address += PAGE_SIZE; |
264 | } | 261 | } |
265 | memset(start, 0, nr * sizeof(struct page)); | 262 | memset((void *)start, 0, end - start); |
266 | ret = 0; | 263 | ret = 0; |
267 | out: | 264 | out: |
268 | flush_tlb_kernel_range(start_addr, end_addr); | 265 | flush_tlb_kernel_range(start, end); |
269 | return ret; | 266 | return ret; |
270 | } | 267 | } |
271 | 268 | ||
272 | void vmemmap_free(struct page *memmap, unsigned long nr_pages) | 269 | void vmemmap_free(unsigned long start, unsigned long end) |
273 | { | 270 | { |
274 | } | 271 | } |
275 | 272 | ||
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 0972e91cced2..82f165f8078c 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp) | |||
747 | 747 | ||
748 | if (!bpf_jit_enable) | 748 | if (!bpf_jit_enable) |
749 | return; | 749 | return; |
750 | addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL); | 750 | addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL); |
751 | if (addrs == NULL) | 751 | if (addrs == NULL) |
752 | return; | 752 | return; |
753 | memset(addrs, 0, fp->len * sizeof(*addrs)); | ||
754 | memset(&jit, 0, sizeof(cjit)); | 753 | memset(&jit, 0, sizeof(cjit)); |
755 | memset(&cjit, 0, sizeof(cjit)); | 754 | memset(&cjit, 0, sizeof(cjit)); |
756 | 755 | ||
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 584b93674ea4..ffeb17ce7f31 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -440,6 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) | |||
440 | switch (id.machine) { | 440 | switch (id.machine) { |
441 | case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; | 441 | case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; |
442 | case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; | 442 | case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; |
443 | case 0x2827: ops->cpu_type = "s390/zEC12"; break; | ||
443 | default: return -ENODEV; | 444 | default: return -ENODEV; |
444 | } | 445 | } |
445 | } | 446 | } |
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index f0f426a113ce..086a2e37935d 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile | |||
@@ -2,5 +2,5 @@ | |||
2 | # Makefile for the s390 PCI subsystem. | 2 | # Makefile for the s390 PCI subsystem. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \ | 5 | obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \ |
6 | pci_sysfs.o pci_event.o pci_debug.o | 6 | pci_event.o pci_debug.o pci_insn.o |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 27b4c17855b9..e6f15b5d8b7d 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -99,9 +99,6 @@ static int __read_mostly aisb_max; | |||
99 | static struct kmem_cache *zdev_irq_cache; | 99 | static struct kmem_cache *zdev_irq_cache; |
100 | static struct kmem_cache *zdev_fmb_cache; | 100 | static struct kmem_cache *zdev_fmb_cache; |
101 | 101 | ||
102 | debug_info_t *pci_debug_msg_id; | ||
103 | debug_info_t *pci_debug_err_id; | ||
104 | |||
105 | static inline int irq_to_msi_nr(unsigned int irq) | 102 | static inline int irq_to_msi_nr(unsigned int irq) |
106 | { | 103 | { |
107 | return irq & ZPCI_MSI_MASK; | 104 | return irq & ZPCI_MSI_MASK; |
@@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, | |||
179 | fib->aisb = (u64) bucket->aisb + aisb / 8; | 176 | fib->aisb = (u64) bucket->aisb + aisb / 8; |
180 | fib->aisbo = aisb & ZPCI_MSI_MASK; | 177 | fib->aisbo = aisb & ZPCI_MSI_MASK; |
181 | 178 | ||
182 | rc = mpcifc_instr(req, fib); | 179 | rc = s390pci_mod_fc(req, fib); |
183 | pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); | 180 | pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); |
184 | 181 | ||
185 | free_page((unsigned long) fib); | 182 | free_page((unsigned long) fib); |
@@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args | |||
209 | fib->iota = args->iota; | 206 | fib->iota = args->iota; |
210 | fib->fmb_addr = args->fmb_addr; | 207 | fib->fmb_addr = args->fmb_addr; |
211 | 208 | ||
212 | rc = mpcifc_instr(req, fib); | 209 | rc = s390pci_mod_fc(req, fib); |
213 | free_page((unsigned long) fib); | 210 | free_page((unsigned long) fib); |
214 | return rc; | 211 | return rc; |
215 | } | 212 | } |
@@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev) | |||
249 | if (zdev->fmb) | 246 | if (zdev->fmb) |
250 | return -EINVAL; | 247 | return -EINVAL; |
251 | 248 | ||
252 | zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL); | 249 | zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); |
253 | if (!zdev->fmb) | 250 | if (!zdev->fmb) |
254 | return -ENOMEM; | 251 | return -ENOMEM; |
255 | memset(zdev->fmb, 0, sizeof(*zdev->fmb)); | ||
256 | WARN_ON((u64) zdev->fmb & 0xf); | 252 | WARN_ON((u64) zdev->fmb & 0xf); |
257 | 253 | ||
258 | args.fmb_addr = virt_to_phys(zdev->fmb); | 254 | args.fmb_addr = virt_to_phys(zdev->fmb); |
@@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) | |||
284 | u64 data; | 280 | u64 data; |
285 | int rc; | 281 | int rc; |
286 | 282 | ||
287 | rc = pcilg_instr(&data, req, offset); | 283 | rc = s390pci_load(&data, req, offset); |
288 | data = data << ((8 - len) * 8); | 284 | if (!rc) { |
289 | data = le64_to_cpu(data); | 285 | data = data << ((8 - len) * 8); |
290 | if (!rc) | 286 | data = le64_to_cpu(data); |
291 | *val = (u32) data; | 287 | *val = (u32) data; |
292 | else | 288 | } else |
293 | *val = 0xffffffff; | 289 | *val = 0xffffffff; |
294 | return rc; | 290 | return rc; |
295 | } | 291 | } |
@@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) | |||
302 | 298 | ||
303 | data = cpu_to_le64(data); | 299 | data = cpu_to_le64(data); |
304 | data = data >> ((8 - len) * 8); | 300 | data = data >> ((8 - len) * 8); |
305 | rc = pcistg_instr(data, req, offset); | 301 | rc = s390pci_store(data, req, offset); |
306 | return rc; | 302 | return rc; |
307 | } | 303 | } |
308 | 304 | ||
@@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | |||
409 | int size, u32 *val) | 405 | int size, u32 *val) |
410 | { | 406 | { |
411 | struct zpci_dev *zdev = get_zdev_by_bus(bus); | 407 | struct zpci_dev *zdev = get_zdev_by_bus(bus); |
408 | int ret; | ||
412 | 409 | ||
413 | if (!zdev || devfn != ZPCI_DEVFN) | 410 | if (!zdev || devfn != ZPCI_DEVFN) |
414 | return 0; | 411 | ret = -ENODEV; |
415 | return zpci_cfg_load(zdev, where, val, size); | 412 | else |
413 | ret = zpci_cfg_load(zdev, where, val, size); | ||
414 | |||
415 | return ret; | ||
416 | } | 416 | } |
417 | 417 | ||
418 | static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, | 418 | static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, |
419 | int size, u32 val) | 419 | int size, u32 val) |
420 | { | 420 | { |
421 | struct zpci_dev *zdev = get_zdev_by_bus(bus); | 421 | struct zpci_dev *zdev = get_zdev_by_bus(bus); |
422 | int ret; | ||
422 | 423 | ||
423 | if (!zdev || devfn != ZPCI_DEVFN) | 424 | if (!zdev || devfn != ZPCI_DEVFN) |
424 | return 0; | 425 | ret = -ENODEV; |
425 | return zpci_cfg_store(zdev, where, val, size); | 426 | else |
427 | ret = zpci_cfg_store(zdev, where, val, size); | ||
428 | |||
429 | return ret; | ||
426 | } | 430 | } |
427 | 431 | ||
428 | static struct pci_ops pci_root_ops = { | 432 | static struct pci_ops pci_root_ops = { |
@@ -474,7 +478,7 @@ scan: | |||
474 | } | 478 | } |
475 | 479 | ||
476 | /* enable interrupts again */ | 480 | /* enable interrupts again */ |
477 | sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | 481 | set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); |
478 | 482 | ||
479 | /* check again to not lose initiative */ | 483 | /* check again to not lose initiative */ |
480 | rmb(); | 484 | rmb(); |
@@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev) | |||
596 | } | 600 | } |
597 | }; | 601 | }; |
598 | 602 | ||
599 | static void zpci_unmap_resources(struct pci_dev *pdev) | ||
600 | { | ||
601 | resource_size_t len; | ||
602 | int i; | ||
603 | |||
604 | for (i = 0; i < PCI_BAR_COUNT; i++) { | ||
605 | len = pci_resource_len(pdev, i); | ||
606 | if (!len) | ||
607 | continue; | ||
608 | pci_iounmap(pdev, (void *) pdev->resource[i].start); | ||
609 | } | ||
610 | }; | ||
611 | |||
612 | struct zpci_dev *zpci_alloc_device(void) | 603 | struct zpci_dev *zpci_alloc_device(void) |
613 | { | 604 | { |
614 | struct zpci_dev *zdev; | 605 | struct zpci_dev *zdev; |
@@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev) | |||
636 | kfree(zdev); | 627 | kfree(zdev); |
637 | } | 628 | } |
638 | 629 | ||
639 | /* Called on removal of pci_dev, leaves zpci and bus device */ | ||
640 | static void zpci_remove_device(struct pci_dev *pdev) | ||
641 | { | ||
642 | struct zpci_dev *zdev = get_zdev(pdev); | ||
643 | |||
644 | dev_info(&pdev->dev, "Removing device %u\n", zdev->domain); | ||
645 | zdev->state = ZPCI_FN_STATE_CONFIGURED; | ||
646 | zpci_dma_exit_device(zdev); | ||
647 | zpci_fmb_disable_device(zdev); | ||
648 | zpci_sysfs_remove_device(&pdev->dev); | ||
649 | zpci_unmap_resources(pdev); | ||
650 | list_del(&zdev->entry); /* can be called from init */ | ||
651 | zdev->pdev = NULL; | ||
652 | } | ||
653 | |||
654 | static void zpci_scan_devices(void) | ||
655 | { | ||
656 | struct zpci_dev *zdev; | ||
657 | |||
658 | mutex_lock(&zpci_list_lock); | ||
659 | list_for_each_entry(zdev, &zpci_list, entry) | ||
660 | if (zdev->state == ZPCI_FN_STATE_CONFIGURED) | ||
661 | zpci_scan_device(zdev); | ||
662 | mutex_unlock(&zpci_list_lock); | ||
663 | } | ||
664 | |||
665 | /* | 630 | /* |
666 | * Too late for any s390 specific setup, since interrupts must be set up | 631 | * Too late for any s390 specific setup, since interrupts must be set up |
667 | * already which requires DMA setup too and the pci scan will access the | 632 | * already which requires DMA setup too and the pci scan will access the |
@@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) | |||
688 | return 0; | 653 | return 0; |
689 | } | 654 | } |
690 | 655 | ||
691 | void pcibios_disable_device(struct pci_dev *pdev) | ||
692 | { | ||
693 | zpci_remove_device(pdev); | ||
694 | pdev->sysdata = NULL; | ||
695 | } | ||
696 | |||
697 | int pcibios_add_platform_entries(struct pci_dev *pdev) | 656 | int pcibios_add_platform_entries(struct pci_dev *pdev) |
698 | { | 657 | { |
699 | return zpci_sysfs_add_device(&pdev->dev); | 658 | return zpci_sysfs_add_device(&pdev->dev); |
@@ -789,7 +748,7 @@ static int __init zpci_irq_init(void) | |||
789 | spin_lock_init(&bucket->lock); | 748 | spin_lock_init(&bucket->lock); |
790 | /* set summary to 1 to be called every time for the ISC */ | 749 | /* set summary to 1 to be called every time for the ISC */ |
791 | *zpci_irq_si = 1; | 750 | *zpci_irq_si = 1; |
792 | sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | 751 | set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); |
793 | return 0; | 752 | return 0; |
794 | 753 | ||
795 | out_ai: | 754 | out_ai: |
@@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry) | |||
872 | spin_unlock(&zpci_iomap_lock); | 831 | spin_unlock(&zpci_iomap_lock); |
873 | } | 832 | } |
874 | 833 | ||
875 | static int zpci_create_device_bus(struct zpci_dev *zdev) | 834 | int pcibios_add_device(struct pci_dev *pdev) |
835 | { | ||
836 | struct zpci_dev *zdev = get_zdev(pdev); | ||
837 | |||
838 | zdev->pdev = pdev; | ||
839 | zpci_debug_init_device(zdev); | ||
840 | zpci_fmb_enable_device(zdev); | ||
841 | zpci_map_resources(zdev); | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | static int zpci_scan_bus(struct zpci_dev *zdev) | ||
876 | { | 847 | { |
877 | struct resource *res; | 848 | struct resource *res; |
878 | LIST_HEAD(resources); | 849 | LIST_HEAD(resources); |
@@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev) | |||
909 | pci_add_resource(&resources, res); | 880 | pci_add_resource(&resources, res); |
910 | } | 881 | } |
911 | 882 | ||
912 | zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, | 883 | zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, |
913 | zdev, &resources); | 884 | zdev, &resources); |
914 | if (!zdev->bus) | 885 | if (!zdev->bus) |
915 | return -EIO; | 886 | return -EIO; |
916 | 887 | ||
@@ -959,6 +930,13 @@ out: | |||
959 | } | 930 | } |
960 | EXPORT_SYMBOL_GPL(zpci_enable_device); | 931 | EXPORT_SYMBOL_GPL(zpci_enable_device); |
961 | 932 | ||
933 | int zpci_disable_device(struct zpci_dev *zdev) | ||
934 | { | ||
935 | zpci_dma_exit_device(zdev); | ||
936 | return clp_disable_fh(zdev); | ||
937 | } | ||
938 | EXPORT_SYMBOL_GPL(zpci_disable_device); | ||
939 | |||
962 | int zpci_create_device(struct zpci_dev *zdev) | 940 | int zpci_create_device(struct zpci_dev *zdev) |
963 | { | 941 | { |
964 | int rc; | 942 | int rc; |
@@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev) | |||
967 | if (rc) | 945 | if (rc) |
968 | goto out; | 946 | goto out; |
969 | 947 | ||
970 | rc = zpci_create_device_bus(zdev); | 948 | if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { |
949 | rc = zpci_enable_device(zdev); | ||
950 | if (rc) | ||
951 | goto out_free; | ||
952 | |||
953 | zdev->state = ZPCI_FN_STATE_ONLINE; | ||
954 | } | ||
955 | rc = zpci_scan_bus(zdev); | ||
971 | if (rc) | 956 | if (rc) |
972 | goto out_bus; | 957 | goto out_disable; |
973 | 958 | ||
974 | mutex_lock(&zpci_list_lock); | 959 | mutex_lock(&zpci_list_lock); |
975 | list_add_tail(&zdev->entry, &zpci_list); | 960 | list_add_tail(&zdev->entry, &zpci_list); |
@@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev) | |||
977 | hotplug_ops->create_slot(zdev); | 962 | hotplug_ops->create_slot(zdev); |
978 | mutex_unlock(&zpci_list_lock); | 963 | mutex_unlock(&zpci_list_lock); |
979 | 964 | ||
980 | if (zdev->state == ZPCI_FN_STATE_STANDBY) | ||
981 | return 0; | ||
982 | |||
983 | rc = zpci_enable_device(zdev); | ||
984 | if (rc) | ||
985 | goto out_start; | ||
986 | return 0; | 965 | return 0; |
987 | 966 | ||
988 | out_start: | 967 | out_disable: |
989 | mutex_lock(&zpci_list_lock); | 968 | if (zdev->state == ZPCI_FN_STATE_ONLINE) |
990 | list_del(&zdev->entry); | 969 | zpci_disable_device(zdev); |
991 | if (hotplug_ops) | 970 | out_free: |
992 | hotplug_ops->remove_slot(zdev); | ||
993 | mutex_unlock(&zpci_list_lock); | ||
994 | out_bus: | ||
995 | zpci_free_domain(zdev); | 971 | zpci_free_domain(zdev); |
996 | out: | 972 | out: |
997 | return rc; | 973 | return rc; |
@@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev) | |||
1016 | goto out; | 992 | goto out; |
1017 | } | 993 | } |
1018 | 994 | ||
1019 | zpci_debug_init_device(zdev); | ||
1020 | zpci_fmb_enable_device(zdev); | ||
1021 | zpci_map_resources(zdev); | ||
1022 | pci_bus_add_devices(zdev->bus); | 995 | pci_bus_add_devices(zdev->bus); |
1023 | 996 | ||
1024 | /* now that pdev was added to the bus mark it as used */ | ||
1025 | zdev->state = ZPCI_FN_STATE_ONLINE; | ||
1026 | return 0; | 997 | return 0; |
1027 | |||
1028 | out: | 998 | out: |
1029 | zpci_dma_exit_device(zdev); | 999 | zpci_dma_exit_device(zdev); |
1030 | clp_disable_fh(zdev); | 1000 | clp_disable_fh(zdev); |
@@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void) | |||
1087 | } | 1057 | } |
1088 | EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); | 1058 | EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); |
1089 | 1059 | ||
1090 | unsigned int s390_pci_probe = 1; | 1060 | unsigned int s390_pci_probe; |
1091 | EXPORT_SYMBOL_GPL(s390_pci_probe); | 1061 | EXPORT_SYMBOL_GPL(s390_pci_probe); |
1092 | 1062 | ||
1093 | char * __init pcibios_setup(char *str) | 1063 | char * __init pcibios_setup(char *str) |
1094 | { | 1064 | { |
1095 | if (!strcmp(str, "off")) { | 1065 | if (!strcmp(str, "on")) { |
1096 | s390_pci_probe = 0; | 1066 | s390_pci_probe = 1; |
1097 | return NULL; | 1067 | return NULL; |
1098 | } | 1068 | } |
1099 | return str; | 1069 | return str; |
@@ -1138,7 +1108,6 @@ static int __init pci_base_init(void) | |||
1138 | if (rc) | 1108 | if (rc) |
1139 | goto out_find; | 1109 | goto out_find; |
1140 | 1110 | ||
1141 | zpci_scan_devices(); | ||
1142 | return 0; | 1111 | return 0; |
1143 | 1112 | ||
1144 | out_find: | 1113 | out_find: |
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index f339fe2feb15..bd34359d1546 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <asm/pci_debug.h> | ||
16 | #include <asm/pci_clp.h> | 17 | #include <asm/pci_clp.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured) | |||
144 | struct zpci_dev *zdev; | 145 | struct zpci_dev *zdev; |
145 | int rc; | 146 | int rc; |
146 | 147 | ||
148 | zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); | ||
147 | zdev = zpci_alloc_device(); | 149 | zdev = zpci_alloc_device(); |
148 | if (IS_ERR(zdev)) | 150 | if (IS_ERR(zdev)) |
149 | return PTR_ERR(zdev); | 151 | return PTR_ERR(zdev); |
@@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) | |||
204 | if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) | 206 | if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) |
205 | *fh = rrb->response.fh; | 207 | *fh = rrb->response.fh; |
206 | else { | 208 | else { |
207 | pr_err("Set PCI FN failed with response: %x cc: %d\n", | 209 | zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc, |
208 | rrb->response.hdr.rsp, rc); | 210 | rrb->response.hdr.rsp); |
209 | rc = -EIO; | 211 | rc = -EIO; |
210 | } | 212 | } |
211 | clp_free_block(rrb); | 213 | clp_free_block(rrb); |
@@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as) | |||
221 | if (!rc) | 223 | if (!rc) |
222 | /* Success -> store enabled handle in zdev */ | 224 | /* Success -> store enabled handle in zdev */ |
223 | zdev->fh = fh; | 225 | zdev->fh = fh; |
226 | |||
227 | zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); | ||
224 | return rc; | 228 | return rc; |
225 | } | 229 | } |
226 | 230 | ||
@@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev) | |||
237 | if (!rc) | 241 | if (!rc) |
238 | /* Success -> store disabled handle in zdev */ | 242 | /* Success -> store disabled handle in zdev */ |
239 | zdev->fh = fh; | 243 | zdev->fh = fh; |
240 | else | 244 | |
241 | dev_err(&zdev->pdev->dev, | 245 | zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); |
242 | "Failed to disable fn handle: 0x%x\n", fh); | ||
243 | return rc; | 246 | return rc; |
244 | } | 247 | } |
245 | 248 | ||
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index a5d07bc2a547..771b82359af4 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c | |||
@@ -11,12 +11,17 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/export.h> | ||
14 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
15 | #include <asm/debug.h> | 16 | #include <asm/debug.h> |
16 | 17 | ||
17 | #include <asm/pci_dma.h> | 18 | #include <asm/pci_dma.h> |
18 | 19 | ||
19 | static struct dentry *debugfs_root; | 20 | static struct dentry *debugfs_root; |
21 | debug_info_t *pci_debug_msg_id; | ||
22 | EXPORT_SYMBOL_GPL(pci_debug_msg_id); | ||
23 | debug_info_t *pci_debug_err_id; | ||
24 | EXPORT_SYMBOL_GPL(pci_debug_err_id); | ||
20 | 25 | ||
21 | static char *pci_perf_names[] = { | 26 | static char *pci_perf_names[] = { |
22 | /* hardware counters */ | 27 | /* hardware counters */ |
@@ -168,7 +173,6 @@ int __init zpci_debug_init(void) | |||
168 | return -EINVAL; | 173 | return -EINVAL; |
169 | debug_register_view(pci_debug_msg_id, &debug_sprintf_view); | 174 | debug_register_view(pci_debug_msg_id, &debug_sprintf_view); |
170 | debug_set_level(pci_debug_msg_id, 3); | 175 | debug_set_level(pci_debug_msg_id, 3); |
171 | zpci_dbg("Debug view initialized\n"); | ||
172 | 176 | ||
173 | /* error log */ | 177 | /* error log */ |
174 | pci_debug_err_id = debug_register("pci_error", 2, 1, 16); | 178 | pci_debug_err_id = debug_register("pci_error", 2, 1, 16); |
@@ -176,7 +180,6 @@ int __init zpci_debug_init(void) | |||
176 | return -EINVAL; | 180 | return -EINVAL; |
177 | debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); | 181 | debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); |
178 | debug_set_level(pci_debug_err_id, 6); | 182 | debug_set_level(pci_debug_err_id, 6); |
179 | zpci_err("Debug view initialized\n"); | ||
180 | 183 | ||
181 | debugfs_root = debugfs_create_dir("pci", NULL); | 184 | debugfs_root = debugfs_create_dir("pci", NULL); |
182 | return 0; | 185 | return 0; |
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index a547419907c3..f8e69d5bc0a9 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, | |||
169 | * needs to be redone! | 169 | * needs to be redone! |
170 | */ | 170 | */ |
171 | goto no_refresh; | 171 | goto no_refresh; |
172 | rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr, | 172 | |
173 | nr_pages * PAGE_SIZE); | 173 | rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, |
174 | nr_pages * PAGE_SIZE); | ||
174 | 175 | ||
175 | no_refresh: | 176 | no_refresh: |
176 | spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); | 177 | spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); |
@@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, | |||
268 | int flags = ZPCI_PTE_VALID; | 269 | int flags = ZPCI_PTE_VALID; |
269 | dma_addr_t dma_addr; | 270 | dma_addr_t dma_addr; |
270 | 271 | ||
271 | WARN_ON_ONCE(offset > PAGE_SIZE); | ||
272 | |||
273 | /* This rounds up number of pages based on size and offset */ | 272 | /* This rounds up number of pages based on size and offset */ |
274 | nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); | 273 | nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); |
275 | iommu_page_index = dma_alloc_iommu(zdev, nr_pages); | 274 | iommu_page_index = dma_alloc_iommu(zdev, nr_pages); |
@@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, | |||
291 | 290 | ||
292 | if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { | 291 | if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { |
293 | atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); | 292 | atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); |
294 | return dma_addr + offset; | 293 | return dma_addr + (offset & ~PAGE_MASK); |
295 | } | 294 | } |
296 | 295 | ||
297 | out_free: | 296 | out_free: |
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c new file mode 100644 index 000000000000..22eeb9d7ffeb --- /dev/null +++ b/arch/s390/pci/pci_insn.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * s390 specific pci instructions | ||
3 | * | ||
4 | * Copyright IBM Corp. 2013 | ||
5 | */ | ||
6 | |||
7 | #include <linux/export.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <asm/pci_insn.h> | ||
11 | #include <asm/processor.h> | ||
12 | |||
13 | #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ | ||
14 | |||
15 | /* Modify PCI Function Controls */ | ||
16 | static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) | ||
17 | { | ||
18 | u8 cc; | ||
19 | |||
20 | asm volatile ( | ||
21 | " .insn rxy,0xe300000000d0,%[req],%[fib]\n" | ||
22 | " ipm %[cc]\n" | ||
23 | " srl %[cc],28\n" | ||
24 | : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib) | ||
25 | : : "cc"); | ||
26 | *status = req >> 24 & 0xff; | ||
27 | return cc; | ||
28 | } | ||
29 | |||
30 | int s390pci_mod_fc(u64 req, struct zpci_fib *fib) | ||
31 | { | ||
32 | u8 cc, status; | ||
33 | |||
34 | do { | ||
35 | cc = __mpcifc(req, fib, &status); | ||
36 | if (cc == 2) | ||
37 | msleep(ZPCI_INSN_BUSY_DELAY); | ||
38 | } while (cc == 2); | ||
39 | |||
40 | if (cc) | ||
41 | printk_once(KERN_ERR "%s: error cc: %d status: %d\n", | ||
42 | __func__, cc, status); | ||
43 | return (cc) ? -EIO : 0; | ||
44 | } | ||
45 | |||
46 | /* Refresh PCI Translations */ | ||
47 | static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) | ||
48 | { | ||
49 | register u64 __addr asm("2") = addr; | ||
50 | register u64 __range asm("3") = range; | ||
51 | u8 cc; | ||
52 | |||
53 | asm volatile ( | ||
54 | " .insn rre,0xb9d30000,%[fn],%[addr]\n" | ||
55 | " ipm %[cc]\n" | ||
56 | " srl %[cc],28\n" | ||
57 | : [cc] "=d" (cc), [fn] "+d" (fn) | ||
58 | : [addr] "d" (__addr), "d" (__range) | ||
59 | : "cc"); | ||
60 | *status = fn >> 24 & 0xff; | ||
61 | return cc; | ||
62 | } | ||
63 | |||
64 | int s390pci_refresh_trans(u64 fn, u64 addr, u64 range) | ||
65 | { | ||
66 | u8 cc, status; | ||
67 | |||
68 | do { | ||
69 | cc = __rpcit(fn, addr, range, &status); | ||
70 | if (cc == 2) | ||
71 | udelay(ZPCI_INSN_BUSY_DELAY); | ||
72 | } while (cc == 2); | ||
73 | |||
74 | if (cc) | ||
75 | printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n", | ||
76 | __func__, cc, status, addr, range); | ||
77 | return (cc) ? -EIO : 0; | ||
78 | } | ||
79 | |||
80 | /* Set Interruption Controls */ | ||
81 | void set_irq_ctrl(u16 ctl, char *unused, u8 isc) | ||
82 | { | ||
83 | asm volatile ( | ||
84 | " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" | ||
85 | : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); | ||
86 | } | ||
87 | |||
88 | /* PCI Load */ | ||
89 | static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) | ||
90 | { | ||
91 | register u64 __req asm("2") = req; | ||
92 | register u64 __offset asm("3") = offset; | ||
93 | int cc = -ENXIO; | ||
94 | u64 __data; | ||
95 | |||
96 | asm volatile ( | ||
97 | " .insn rre,0xb9d20000,%[data],%[req]\n" | ||
98 | "0: ipm %[cc]\n" | ||
99 | " srl %[cc],28\n" | ||
100 | "1:\n" | ||
101 | EX_TABLE(0b, 1b) | ||
102 | : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req) | ||
103 | : "d" (__offset) | ||
104 | : "cc"); | ||
105 | *status = __req >> 24 & 0xff; | ||
106 | if (!cc) | ||
107 | *data = __data; | ||
108 | |||
109 | return cc; | ||
110 | } | ||
111 | |||
112 | int s390pci_load(u64 *data, u64 req, u64 offset) | ||
113 | { | ||
114 | u8 status; | ||
115 | int cc; | ||
116 | |||
117 | do { | ||
118 | cc = __pcilg(data, req, offset, &status); | ||
119 | if (cc == 2) | ||
120 | udelay(ZPCI_INSN_BUSY_DELAY); | ||
121 | } while (cc == 2); | ||
122 | |||
123 | if (cc) | ||
124 | printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", | ||
125 | __func__, cc, status, req, offset); | ||
126 | return (cc > 0) ? -EIO : cc; | ||
127 | } | ||
128 | EXPORT_SYMBOL_GPL(s390pci_load); | ||
129 | |||
130 | /* PCI Store */ | ||
131 | static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) | ||
132 | { | ||
133 | register u64 __req asm("2") = req; | ||
134 | register u64 __offset asm("3") = offset; | ||
135 | int cc = -ENXIO; | ||
136 | |||
137 | asm volatile ( | ||
138 | " .insn rre,0xb9d00000,%[data],%[req]\n" | ||
139 | "0: ipm %[cc]\n" | ||
140 | " srl %[cc],28\n" | ||
141 | "1:\n" | ||
142 | EX_TABLE(0b, 1b) | ||
143 | : [cc] "+d" (cc), [req] "+d" (__req) | ||
144 | : "d" (__offset), [data] "d" (data) | ||
145 | : "cc"); | ||
146 | *status = __req >> 24 & 0xff; | ||
147 | return cc; | ||
148 | } | ||
149 | |||
150 | int s390pci_store(u64 data, u64 req, u64 offset) | ||
151 | { | ||
152 | u8 status; | ||
153 | int cc; | ||
154 | |||
155 | do { | ||
156 | cc = __pcistg(data, req, offset, &status); | ||
157 | if (cc == 2) | ||
158 | udelay(ZPCI_INSN_BUSY_DELAY); | ||
159 | } while (cc == 2); | ||
160 | |||
161 | if (cc) | ||
162 | printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", | ||
163 | __func__, cc, status, req, offset); | ||
164 | return (cc > 0) ? -EIO : cc; | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(s390pci_store); | ||
167 | |||
168 | /* PCI Store Block */ | ||
169 | static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) | ||
170 | { | ||
171 | int cc = -ENXIO; | ||
172 | |||
173 | asm volatile ( | ||
174 | " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" | ||
175 | "0: ipm %[cc]\n" | ||
176 | " srl %[cc],28\n" | ||
177 | "1:\n" | ||
178 | EX_TABLE(0b, 1b) | ||
179 | : [cc] "+d" (cc), [req] "+d" (req) | ||
180 | : [offset] "d" (offset), [data] "Q" (*data) | ||
181 | : "cc"); | ||
182 | *status = req >> 24 & 0xff; | ||
183 | return cc; | ||
184 | } | ||
185 | |||
186 | int s390pci_store_block(const u64 *data, u64 req, u64 offset) | ||
187 | { | ||
188 | u8 status; | ||
189 | int cc; | ||
190 | |||
191 | do { | ||
192 | cc = __pcistb(data, req, offset, &status); | ||
193 | if (cc == 2) | ||
194 | udelay(ZPCI_INSN_BUSY_DELAY); | ||
195 | } while (cc == 2); | ||
196 | |||
197 | if (cc) | ||
198 | printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", | ||
199 | __func__, cc, status, req, offset); | ||
200 | return (cc > 0) ? -EIO : cc; | ||
201 | } | ||
202 | EXPORT_SYMBOL_GPL(s390pci_store_block); | ||
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c index 0297931335e1..b097aed05a9b 100644 --- a/arch/s390/pci/pci_msi.c +++ b/arch/s390/pci/pci_msi.c | |||
@@ -18,8 +18,9 @@ | |||
18 | 18 | ||
19 | /* mapping of irq numbers to msi_desc */ | 19 | /* mapping of irq numbers to msi_desc */ |
20 | static struct hlist_head *msi_hash; | 20 | static struct hlist_head *msi_hash; |
21 | static unsigned int msihash_shift = 6; | 21 | static const unsigned int msi_hash_bits = 8; |
22 | #define msi_hashfn(nr) hash_long(nr, msihash_shift) | 22 | #define MSI_HASH_BUCKETS (1U << msi_hash_bits) |
23 | #define msi_hashfn(nr) hash_long(nr, msi_hash_bits) | ||
23 | 24 | ||
24 | static DEFINE_SPINLOCK(msi_map_lock); | 25 | static DEFINE_SPINLOCK(msi_map_lock); |
25 | 26 | ||
@@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi, | |||
74 | map->irq = nr; | 75 | map->irq = nr; |
75 | map->msi = msi; | 76 | map->msi = msi; |
76 | zdev->msi_map[nr & ZPCI_MSI_MASK] = map; | 77 | zdev->msi_map[nr & ZPCI_MSI_MASK] = map; |
78 | INIT_HLIST_NODE(&map->msi_chain); | ||
77 | 79 | ||
78 | pr_debug("%s hashing irq: %u to bucket nr: %llu\n", | 80 | pr_debug("%s hashing irq: %u to bucket nr: %llu\n", |
79 | __func__, nr, msi_hashfn(nr)); | 81 | __func__, nr, msi_hashfn(nr)); |
@@ -125,11 +127,11 @@ int __init zpci_msihash_init(void) | |||
125 | { | 127 | { |
126 | unsigned int i; | 128 | unsigned int i; |
127 | 129 | ||
128 | msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL); | 130 | msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL); |
129 | if (!msi_hash) | 131 | if (!msi_hash) |
130 | return -ENOMEM; | 132 | return -ENOMEM; |
131 | 133 | ||
132 | for (i = 0; i < (1U << msihash_shift); i++) | 134 | for (i = 0; i < MSI_HASH_BUCKETS; i++) |
133 | INIT_HLIST_HEAD(&msi_hash[i]); | 135 | INIT_HLIST_HEAD(&msi_hash[i]); |
134 | return 0; | 136 | return 0; |
135 | } | 137 | } |