aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-30 19:25:51 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-30 19:25:51 -0500
commitbd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch)
tree5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /arch/s390
parent4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff)
parent5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/crypto/Kconfig60
-rw-r--r--arch/s390/crypto/aes_s390.c229
-rw-r--r--arch/s390/crypto/prng.c4
-rw-r--r--arch/s390/hypfs/inode.c13
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/ipl.c1035
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/ptrace.c15
-rw-r--r--arch/s390/kernel/setup.c139
-rw-r--r--arch/s390/kernel/signal.c20
-rw-r--r--arch/s390/kernel/smp.c575
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/traps.c20
-rw-r--r--arch/s390/kernel/vmlinux.lds.S20
-rw-r--r--arch/s390/lib/spinlock.c35
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/vmem.c26
20 files changed, 1425 insertions, 799 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1330061020ab..6ef54d27fc00 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -276,9 +276,6 @@ source "kernel/Kconfig.preempt"
276 276
277source "mm/Kconfig" 277source "mm/Kconfig"
278 278
279config HOLES_IN_ZONE
280 def_bool y
281
282comment "I/O subsystem configuration" 279comment "I/O subsystem configuration"
283 280
284config MACHCHK_WARNING 281config MACHCHK_WARNING
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
deleted file mode 100644
index d1defbbfcd81..000000000000
--- a/arch/s390/crypto/Kconfig
+++ /dev/null
@@ -1,60 +0,0 @@
1config CRYPTO_SHA1_S390
2 tristate "SHA1 digest algorithm"
3 depends on S390
4 select CRYPTO_ALGAPI
5 help
6 This is the s390 hardware accelerated implementation of the
7 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
8
9config CRYPTO_SHA256_S390
10 tristate "SHA256 digest algorithm"
11 depends on S390
12 select CRYPTO_ALGAPI
13 help
14 This is the s390 hardware accelerated implementation of the
15 SHA256 secure hash standard (DFIPS 180-2).
16
17 This version of SHA implements a 256 bit hash with 128 bits of
18 security against collision attacks.
19
20config CRYPTO_DES_S390
21 tristate "DES and Triple DES cipher algorithms"
22 depends on S390
23 select CRYPTO_ALGAPI
24 select CRYPTO_BLKCIPHER
25 help
26 This us the s390 hardware accelerated implementation of the
27 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
28
29config CRYPTO_AES_S390
30 tristate "AES cipher algorithms"
31 depends on S390
32 select CRYPTO_ALGAPI
33 select CRYPTO_BLKCIPHER
34 help
35 This is the s390 hardware accelerated implementation of the
36 AES cipher algorithms (FIPS-197). AES uses the Rijndael
37 algorithm.
38
39 Rijndael appears to be consistently a very good performer in
40 both hardware and software across a wide range of computing
41 environments regardless of its use in feedback or non-feedback
42 modes. Its key setup time is excellent, and its key agility is
43 good. Rijndael's very low memory requirements make it very well
44 suited for restricted-space environments, in which it also
45 demonstrates excellent performance. Rijndael's operations are
46 among the easiest to defend against power and timing attacks.
47
48 On s390 the System z9-109 currently only supports the key size
49 of 128 bit.
50
51config S390_PRNG
52 tristate "Pseudo random number generator device driver"
53 depends on S390
54 default "m"
55 help
56 Select this option if you want to use the s390 pseudo random number
57 generator. The PRNG is part of the cryptographic processor functions
58 and uses triple-DES to generate secure random numbers like the
59 ANSI X9.17 standard. The PRNG is usable via the char device
60 /dev/prandom.
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 512669691ad0..a3f67f8b5427 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -6,6 +6,7 @@
6 * s390 Version: 6 * s390 Version:
7 * Copyright IBM Corp. 2005,2007 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
9 * 10 *
10 * Derived from "crypto/aes_generic.c" 11 * Derived from "crypto/aes_generic.c"
11 * 12 *
@@ -16,17 +17,13 @@
16 * 17 *
17 */ 18 */
18 19
20#include <crypto/aes.h>
19#include <crypto/algapi.h> 21#include <crypto/algapi.h>
22#include <linux/err.h>
20#include <linux/module.h> 23#include <linux/module.h>
21#include <linux/init.h> 24#include <linux/init.h>
22#include "crypt_s390.h" 25#include "crypt_s390.h"
23 26
24#define AES_MIN_KEY_SIZE 16
25#define AES_MAX_KEY_SIZE 32
26
27/* data block size for all key lengths */
28#define AES_BLOCK_SIZE 16
29
30#define AES_KEYLEN_128 1 27#define AES_KEYLEN_128 1
31#define AES_KEYLEN_192 2 28#define AES_KEYLEN_192 2
32#define AES_KEYLEN_256 4 29#define AES_KEYLEN_256 4
@@ -39,45 +36,89 @@ struct s390_aes_ctx {
39 long enc; 36 long enc;
40 long dec; 37 long dec;
41 int key_len; 38 int key_len;
39 union {
40 struct crypto_blkcipher *blk;
41 struct crypto_cipher *cip;
42 } fallback;
42}; 43};
43 44
44static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 45/*
45 unsigned int key_len) 46 * Check if the key_len is supported by the HW.
47 * Returns 0 if it is, a positive number if it is not and software fallback is
48 * required or a negative number in case the key size is not valid
49 */
50static int need_fallback(unsigned int key_len)
46{ 51{
47 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
48 u32 *flags = &tfm->crt_flags;
49
50 switch (key_len) { 52 switch (key_len) {
51 case 16: 53 case 16:
52 if (!(keylen_flag & AES_KEYLEN_128)) 54 if (!(keylen_flag & AES_KEYLEN_128))
53 goto fail; 55 return 1;
54 break; 56 break;
55 case 24: 57 case 24:
56 if (!(keylen_flag & AES_KEYLEN_192)) 58 if (!(keylen_flag & AES_KEYLEN_192))
57 goto fail; 59 return 1;
58
59 break; 60 break;
60 case 32: 61 case 32:
61 if (!(keylen_flag & AES_KEYLEN_256)) 62 if (!(keylen_flag & AES_KEYLEN_256))
62 goto fail; 63 return 1;
63 break; 64 break;
64 default: 65 default:
65 goto fail; 66 return -1;
66 break; 67 break;
67 } 68 }
69 return 0;
70}
71
72static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
73 unsigned int key_len)
74{
75 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
76 int ret;
77
78 sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
79 sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
80 CRYPTO_TFM_REQ_MASK);
81
82 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
83 if (ret) {
84 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
85 tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
86 CRYPTO_TFM_RES_MASK);
87 }
88 return ret;
89}
90
91static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
92 unsigned int key_len)
93{
94 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
95 u32 *flags = &tfm->crt_flags;
96 int ret;
97
98 ret = need_fallback(key_len);
99 if (ret < 0) {
100 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
101 return -EINVAL;
102 }
68 103
69 sctx->key_len = key_len; 104 sctx->key_len = key_len;
70 memcpy(sctx->key, in_key, key_len); 105 if (!ret) {
71 return 0; 106 memcpy(sctx->key, in_key, key_len);
72fail: 107 return 0;
73 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 108 }
74 return -EINVAL; 109
110 return setkey_fallback_cip(tfm, in_key, key_len);
75} 111}
76 112
77static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 113static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
78{ 114{
79 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 115 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
80 116
117 if (unlikely(need_fallback(sctx->key_len))) {
118 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
119 return;
120 }
121
81 switch (sctx->key_len) { 122 switch (sctx->key_len) {
82 case 16: 123 case 16:
83 crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, 124 crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
@@ -98,6 +139,11 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
98{ 139{
99 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 140 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
100 141
142 if (unlikely(need_fallback(sctx->key_len))) {
143 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
144 return;
145 }
146
101 switch (sctx->key_len) { 147 switch (sctx->key_len) {
102 case 16: 148 case 16:
103 crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, 149 crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
@@ -114,6 +160,29 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
114 } 160 }
115} 161}
116 162
163static int fallback_init_cip(struct crypto_tfm *tfm)
164{
165 const char *name = tfm->__crt_alg->cra_name;
166 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
167
168 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
169 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
170
171 if (IS_ERR(sctx->fallback.cip)) {
172 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
173 return PTR_ERR(sctx->fallback.blk);
174 }
175
176 return 0;
177}
178
179static void fallback_exit_cip(struct crypto_tfm *tfm)
180{
181 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
182
183 crypto_free_cipher(sctx->fallback.cip);
184 sctx->fallback.cip = NULL;
185}
117 186
118static struct crypto_alg aes_alg = { 187static struct crypto_alg aes_alg = {
119 .cra_name = "aes", 188 .cra_name = "aes",
@@ -125,6 +194,8 @@ static struct crypto_alg aes_alg = {
125 .cra_ctxsize = sizeof(struct s390_aes_ctx), 194 .cra_ctxsize = sizeof(struct s390_aes_ctx),
126 .cra_module = THIS_MODULE, 195 .cra_module = THIS_MODULE,
127 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 196 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
197 .cra_init = fallback_init_cip,
198 .cra_exit = fallback_exit_cip,
128 .cra_u = { 199 .cra_u = {
129 .cipher = { 200 .cipher = {
130 .cia_min_keysize = AES_MIN_KEY_SIZE, 201 .cia_min_keysize = AES_MIN_KEY_SIZE,
@@ -136,10 +207,70 @@ static struct crypto_alg aes_alg = {
136 } 207 }
137}; 208};
138 209
210static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
211 unsigned int len)
212{
213 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
214 unsigned int ret;
215
216 sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
217 sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
218 CRYPTO_TFM_REQ_MASK);
219
220 ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
221 if (ret) {
222 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
223 tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
224 CRYPTO_TFM_RES_MASK);
225 }
226 return ret;
227}
228
229static int fallback_blk_dec(struct blkcipher_desc *desc,
230 struct scatterlist *dst, struct scatterlist *src,
231 unsigned int nbytes)
232{
233 unsigned int ret;
234 struct crypto_blkcipher *tfm;
235 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
236
237 tfm = desc->tfm;
238 desc->tfm = sctx->fallback.blk;
239
240 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
241
242 desc->tfm = tfm;
243 return ret;
244}
245
246static int fallback_blk_enc(struct blkcipher_desc *desc,
247 struct scatterlist *dst, struct scatterlist *src,
248 unsigned int nbytes)
249{
250 unsigned int ret;
251 struct crypto_blkcipher *tfm;
252 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
253
254 tfm = desc->tfm;
255 desc->tfm = sctx->fallback.blk;
256
257 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
258
259 desc->tfm = tfm;
260 return ret;
261}
262
139static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 263static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
140 unsigned int key_len) 264 unsigned int key_len)
141{ 265{
142 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 266 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
267 int ret;
268
269 ret = need_fallback(key_len);
270 if (ret > 0) {
271 sctx->key_len = key_len;
272 return setkey_fallback_blk(tfm, in_key, key_len);
273 }
143 274
144 switch (key_len) { 275 switch (key_len) {
145 case 16: 276 case 16:
@@ -188,6 +319,9 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
188 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 319 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
189 struct blkcipher_walk walk; 320 struct blkcipher_walk walk;
190 321
322 if (unlikely(need_fallback(sctx->key_len)))
323 return fallback_blk_enc(desc, dst, src, nbytes);
324
191 blkcipher_walk_init(&walk, dst, src, nbytes); 325 blkcipher_walk_init(&walk, dst, src, nbytes);
192 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); 326 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
193} 327}
@@ -199,10 +333,37 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
199 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 333 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
200 struct blkcipher_walk walk; 334 struct blkcipher_walk walk;
201 335
336 if (unlikely(need_fallback(sctx->key_len)))
337 return fallback_blk_dec(desc, dst, src, nbytes);
338
202 blkcipher_walk_init(&walk, dst, src, nbytes); 339 blkcipher_walk_init(&walk, dst, src, nbytes);
203 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); 340 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
204} 341}
205 342
343static int fallback_init_blk(struct crypto_tfm *tfm)
344{
345 const char *name = tfm->__crt_alg->cra_name;
346 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
347
348 sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
349 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
350
351 if (IS_ERR(sctx->fallback.blk)) {
352 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
353 return PTR_ERR(sctx->fallback.blk);
354 }
355
356 return 0;
357}
358
359static void fallback_exit_blk(struct crypto_tfm *tfm)
360{
361 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
362
363 crypto_free_blkcipher(sctx->fallback.blk);
364 sctx->fallback.blk = NULL;
365}
366
206static struct crypto_alg ecb_aes_alg = { 367static struct crypto_alg ecb_aes_alg = {
207 .cra_name = "ecb(aes)", 368 .cra_name = "ecb(aes)",
208 .cra_driver_name = "ecb-aes-s390", 369 .cra_driver_name = "ecb-aes-s390",
@@ -214,6 +375,8 @@ static struct crypto_alg ecb_aes_alg = {
214 .cra_type = &crypto_blkcipher_type, 375 .cra_type = &crypto_blkcipher_type,
215 .cra_module = THIS_MODULE, 376 .cra_module = THIS_MODULE,
216 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), 377 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
378 .cra_init = fallback_init_blk,
379 .cra_exit = fallback_exit_blk,
217 .cra_u = { 380 .cra_u = {
218 .blkcipher = { 381 .blkcipher = {
219 .min_keysize = AES_MIN_KEY_SIZE, 382 .min_keysize = AES_MIN_KEY_SIZE,
@@ -229,6 +392,13 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
229 unsigned int key_len) 392 unsigned int key_len)
230{ 393{
231 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 394 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
395 int ret;
396
397 ret = need_fallback(key_len);
398 if (ret > 0) {
399 sctx->key_len = key_len;
400 return setkey_fallback_blk(tfm, in_key, key_len);
401 }
232 402
233 switch (key_len) { 403 switch (key_len) {
234 case 16: 404 case 16:
@@ -283,6 +453,9 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
283 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 453 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
284 struct blkcipher_walk walk; 454 struct blkcipher_walk walk;
285 455
456 if (unlikely(need_fallback(sctx->key_len)))
457 return fallback_blk_enc(desc, dst, src, nbytes);
458
286 blkcipher_walk_init(&walk, dst, src, nbytes); 459 blkcipher_walk_init(&walk, dst, src, nbytes);
287 return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); 460 return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
288} 461}
@@ -294,6 +467,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
294 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 467 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
295 struct blkcipher_walk walk; 468 struct blkcipher_walk walk;
296 469
470 if (unlikely(need_fallback(sctx->key_len)))
471 return fallback_blk_dec(desc, dst, src, nbytes);
472
297 blkcipher_walk_init(&walk, dst, src, nbytes); 473 blkcipher_walk_init(&walk, dst, src, nbytes);
298 return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); 474 return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
299} 475}
@@ -309,6 +485,8 @@ static struct crypto_alg cbc_aes_alg = {
309 .cra_type = &crypto_blkcipher_type, 485 .cra_type = &crypto_blkcipher_type,
310 .cra_module = THIS_MODULE, 486 .cra_module = THIS_MODULE,
311 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), 487 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
488 .cra_init = fallback_init_blk,
489 .cra_exit = fallback_exit_blk,
312 .cra_u = { 490 .cra_u = {
313 .blkcipher = { 491 .blkcipher = {
314 .min_keysize = AES_MIN_KEY_SIZE, 492 .min_keysize = AES_MIN_KEY_SIZE,
@@ -336,14 +514,10 @@ static int __init aes_init(void)
336 return -EOPNOTSUPP; 514 return -EOPNOTSUPP;
337 515
338 /* z9 109 and z9 BC/EC only support 128 bit key length */ 516 /* z9 109 and z9 BC/EC only support 128 bit key length */
339 if (keylen_flag == AES_KEYLEN_128) { 517 if (keylen_flag == AES_KEYLEN_128)
340 aes_alg.cra_u.cipher.cia_max_keysize = AES_MIN_KEY_SIZE;
341 ecb_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
342 cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
343 printk(KERN_INFO 518 printk(KERN_INFO
344 "aes_s390: hardware acceleration only available for" 519 "aes_s390: hardware acceleration only available for "
345 "128 bit keys\n"); 520 "128 bit keys\n");
346 }
347 521
348 ret = crypto_register_alg(&aes_alg); 522 ret = crypto_register_alg(&aes_alg);
349 if (ret) 523 if (ret)
@@ -382,4 +556,3 @@ MODULE_ALIAS("aes");
382 556
383MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 557MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
384MODULE_LICENSE("GPL"); 558MODULE_LICENSE("GPL");
385
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 8eb3a1aedc22..0cfefddd8375 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -90,7 +90,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
90 int ret = 0; 90 int ret = 0;
91 int tmp; 91 int tmp;
92 92
93 /* nbytes can be arbitrary long, we spilt it into chunks */ 93 /* nbytes can be arbitrary length, we split it into chunks */
94 while (nbytes) { 94 while (nbytes) {
95 /* same as in extract_entropy_user in random.c */ 95 /* same as in extract_entropy_user in random.c */
96 if (need_resched()) { 96 if (need_resched()) {
@@ -146,7 +146,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
146 return ret; 146 return ret;
147} 147}
148 148
149static struct file_operations prng_fops = { 149static const struct file_operations prng_fops = {
150 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
151 .open = &prng_open, 151 .open = &prng_open,
152 .release = NULL, 152 .release = NULL,
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 5245717295b8..4b010ff814c9 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -490,7 +490,7 @@ static struct super_operations hypfs_s_ops = {
490 .show_options = hypfs_show_options, 490 .show_options = hypfs_show_options,
491}; 491};
492 492
493static decl_subsys(s390, NULL, NULL); 493static struct kobject *s390_kobj;
494 494
495static int __init hypfs_init(void) 495static int __init hypfs_init(void)
496{ 496{
@@ -506,17 +506,18 @@ static int __init hypfs_init(void)
506 goto fail_diag; 506 goto fail_diag;
507 } 507 }
508 } 508 }
509 kobj_set_kset_s(&s390_subsys, hypervisor_subsys); 509 s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
510 rc = subsystem_register(&s390_subsys); 510 if (!s390_kobj) {
511 if (rc) 511 rc = -ENOMEM;;
512 goto fail_sysfs; 512 goto fail_sysfs;
513 }
513 rc = register_filesystem(&hypfs_type); 514 rc = register_filesystem(&hypfs_type);
514 if (rc) 515 if (rc)
515 goto fail_filesystem; 516 goto fail_filesystem;
516 return 0; 517 return 0;
517 518
518fail_filesystem: 519fail_filesystem:
519 subsystem_unregister(&s390_subsys); 520 kobject_put(s390_kobj);
520fail_sysfs: 521fail_sysfs:
521 if (!MACHINE_IS_VM) 522 if (!MACHINE_IS_VM)
522 hypfs_diag_exit(); 523 hypfs_diag_exit();
@@ -530,7 +531,7 @@ static void __exit hypfs_exit(void)
530 if (!MACHINE_IS_VM) 531 if (!MACHINE_IS_VM)
531 hypfs_diag_exit(); 532 hypfs_diag_exit();
532 unregister_filesystem(&hypfs_type); 533 unregister_filesystem(&hypfs_type);
533 subsystem_unregister(&s390_subsys); 534 kobject_put(s390_kobj);
534} 535}
535 536
536module_init(hypfs_init) 537module_init(hypfs_init)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 56cb71007cd9..b3b650a93c7c 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -31,7 +31,3 @@ S390_KEXEC_OBJS := machine_kexec.o crash.o
31S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) 31S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
32obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) 32obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
33 33
34#
35# This is just to get the dependencies...
36#
37binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1b3af7dab816..9f7b73b180f0 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -276,7 +276,7 @@ void __init startup_init(void)
276 create_kernel_nss(); 276 create_kernel_nss();
277 sort_main_extable(); 277 sort_main_extable();
278 setup_lowcore_early(); 278 setup_lowcore_early();
279 sclp_readinfo_early(); 279 sclp_read_info_early();
280 sclp_facilities_detect(); 280 sclp_facilities_detect();
281 memsize = sclp_memory_detect(); 281 memsize = sclp_memory_detect();
282#ifndef CONFIG_64BIT 282#ifndef CONFIG_64BIT
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index a87b1976d409..79dccd206a6e 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -157,7 +157,7 @@ startup_continue:
157 .long 0xb2b10000 # store facility list 157 .long 0xb2b10000 # store facility list
158 tm 0xc8,0x08 # check bit for clearing-by-ASCE 158 tm 0xc8,0x08 # check bit for clearing-by-ASCE
159 bno 0f-.LPG1(%r13) 159 bno 0f-.LPG1(%r13)
160 lhi %r1,2094 160 lhi %r1,2048
161 lhi %r2,0 161 lhi %r2,0
162 .long 0xb98e2001 162 .long 0xb98e2001
163 oi 7(%r12),0x80 # set IDTE flag 163 oi 7(%r12),0x80 # set IDTE flag
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index ce0856d32500..db28cca81fef 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/ipl.c 2 * arch/s390/kernel/ipl.c
3 * ipl/reipl/dump support for Linux on s390. 3 * ipl/reipl/dump support for Linux on s390.
4 * 4 *
5 * Copyright (C) IBM Corp. 2005,2006 5 * Copyright IBM Corp. 2005,2007
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Volker Sameske <sameske@de.ibm.com> 8 * Volker Sameske <sameske@de.ibm.com>
@@ -31,6 +31,43 @@
31#define IPL_FCP_DUMP_STR "fcp_dump" 31#define IPL_FCP_DUMP_STR "fcp_dump"
32#define IPL_NSS_STR "nss" 32#define IPL_NSS_STR "nss"
33 33
34#define DUMP_CCW_STR "ccw"
35#define DUMP_FCP_STR "fcp"
36#define DUMP_NONE_STR "none"
37
38/*
39 * Four shutdown trigger types are supported:
40 * - panic
41 * - halt
42 * - power off
43 * - reipl
44 */
45#define ON_PANIC_STR "on_panic"
46#define ON_HALT_STR "on_halt"
47#define ON_POFF_STR "on_poff"
48#define ON_REIPL_STR "on_reboot"
49
50struct shutdown_action;
51struct shutdown_trigger {
52 char *name;
53 struct shutdown_action *action;
54};
55
56/*
57 * Five shutdown action types are supported:
58 */
59#define SHUTDOWN_ACTION_IPL_STR "ipl"
60#define SHUTDOWN_ACTION_REIPL_STR "reipl"
61#define SHUTDOWN_ACTION_DUMP_STR "dump"
62#define SHUTDOWN_ACTION_VMCMD_STR "vmcmd"
63#define SHUTDOWN_ACTION_STOP_STR "stop"
64
65struct shutdown_action {
66 char *name;
67 void (*fn) (struct shutdown_trigger *trigger);
68 int (*init) (void);
69};
70
34static char *ipl_type_str(enum ipl_type type) 71static char *ipl_type_str(enum ipl_type type)
35{ 72{
36 switch (type) { 73 switch (type) {
@@ -54,10 +91,6 @@ enum dump_type {
54 DUMP_TYPE_FCP = 4, 91 DUMP_TYPE_FCP = 4,
55}; 92};
56 93
57#define DUMP_NONE_STR "none"
58#define DUMP_CCW_STR "ccw"
59#define DUMP_FCP_STR "fcp"
60
61static char *dump_type_str(enum dump_type type) 94static char *dump_type_str(enum dump_type type)
62{ 95{
63 switch (type) { 96 switch (type) {
@@ -99,30 +132,6 @@ enum dump_method {
99 DUMP_METHOD_FCP_DIAG, 132 DUMP_METHOD_FCP_DIAG,
100}; 133};
101 134
102enum shutdown_action {
103 SHUTDOWN_REIPL,
104 SHUTDOWN_DUMP,
105 SHUTDOWN_STOP,
106};
107
108#define SHUTDOWN_REIPL_STR "reipl"
109#define SHUTDOWN_DUMP_STR "dump"
110#define SHUTDOWN_STOP_STR "stop"
111
112static char *shutdown_action_str(enum shutdown_action action)
113{
114 switch (action) {
115 case SHUTDOWN_REIPL:
116 return SHUTDOWN_REIPL_STR;
117 case SHUTDOWN_DUMP:
118 return SHUTDOWN_DUMP_STR;
119 case SHUTDOWN_STOP:
120 return SHUTDOWN_STOP_STR;
121 default:
122 return NULL;
123 }
124}
125
126static int diag308_set_works = 0; 135static int diag308_set_works = 0;
127 136
128static int reipl_capabilities = IPL_TYPE_UNKNOWN; 137static int reipl_capabilities = IPL_TYPE_UNKNOWN;
@@ -140,8 +149,6 @@ static enum dump_method dump_method = DUMP_METHOD_NONE;
140static struct ipl_parameter_block *dump_block_fcp; 149static struct ipl_parameter_block *dump_block_fcp;
141static struct ipl_parameter_block *dump_block_ccw; 150static struct ipl_parameter_block *dump_block_ccw;
142 151
143static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
144
145static struct sclp_ipl_info sclp_ipl_info; 152static struct sclp_ipl_info sclp_ipl_info;
146 153
147int diag308(unsigned long subcode, void *addr) 154int diag308(unsigned long subcode, void *addr)
@@ -162,22 +169,25 @@ EXPORT_SYMBOL_GPL(diag308);
162/* SYSFS */ 169/* SYSFS */
163 170
164#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ 171#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
165static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ 172static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
173 struct kobj_attribute *attr, \
166 char *page) \ 174 char *page) \
167{ \ 175{ \
168 return sprintf(page, _format, _value); \ 176 return sprintf(page, _format, _value); \
169} \ 177} \
170static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ 178static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
171 __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL); 179 __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
172 180
173#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ 181#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
174static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ 182static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
183 struct kobj_attribute *attr, \
175 char *page) \ 184 char *page) \
176{ \ 185{ \
177 return sprintf(page, _fmt_out, \ 186 return sprintf(page, _fmt_out, \
178 (unsigned long long) _value); \ 187 (unsigned long long) _value); \
179} \ 188} \
180static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \ 189static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
190 struct kobj_attribute *attr, \
181 const char *buf, size_t len) \ 191 const char *buf, size_t len) \
182{ \ 192{ \
183 unsigned long long value; \ 193 unsigned long long value; \
@@ -186,25 +196,27 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \
186 _value = value; \ 196 _value = value; \
187 return len; \ 197 return len; \
188} \ 198} \
189static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ 199static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
190 __ATTR(_name,(S_IRUGO | S_IWUSR), \ 200 __ATTR(_name,(S_IRUGO | S_IWUSR), \
191 sys_##_prefix##_##_name##_show, \ 201 sys_##_prefix##_##_name##_show, \
192 sys_##_prefix##_##_name##_store); 202 sys_##_prefix##_##_name##_store);
193 203
194#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ 204#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
195static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \ 205static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
206 struct kobj_attribute *attr, \
196 char *page) \ 207 char *page) \
197{ \ 208{ \
198 return sprintf(page, _fmt_out, _value); \ 209 return sprintf(page, _fmt_out, _value); \
199} \ 210} \
200static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \ 211static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
212 struct kobj_attribute *attr, \
201 const char *buf, size_t len) \ 213 const char *buf, size_t len) \
202{ \ 214{ \
203 if (sscanf(buf, _fmt_in, _value) != 1) \ 215 strncpy(_value, buf, sizeof(_value) - 1); \
204 return -EINVAL; \ 216 strstrip(_value); \
205 return len; \ 217 return len; \
206} \ 218} \
207static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ 219static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
208 __ATTR(_name,(S_IRUGO | S_IWUSR), \ 220 __ATTR(_name,(S_IRUGO | S_IWUSR), \
209 sys_##_prefix##_##_name##_show, \ 221 sys_##_prefix##_##_name##_show, \
210 sys_##_prefix##_##_name##_store); 222 sys_##_prefix##_##_name##_store);
@@ -240,44 +252,19 @@ static __init enum ipl_type get_ipl_type(void)
240 return IPL_TYPE_FCP; 252 return IPL_TYPE_FCP;
241} 253}
242 254
243void __init setup_ipl_info(void)
244{
245 ipl_info.type = get_ipl_type();
246 switch (ipl_info.type) {
247 case IPL_TYPE_CCW:
248 ipl_info.data.ccw.dev_id.devno = ipl_devno;
249 ipl_info.data.ccw.dev_id.ssid = 0;
250 break;
251 case IPL_TYPE_FCP:
252 case IPL_TYPE_FCP_DUMP:
253 ipl_info.data.fcp.dev_id.devno =
254 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
255 ipl_info.data.fcp.dev_id.ssid = 0;
256 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
257 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
258 break;
259 case IPL_TYPE_NSS:
260 strncpy(ipl_info.data.nss.name, kernel_nss_name,
261 sizeof(ipl_info.data.nss.name));
262 break;
263 case IPL_TYPE_UNKNOWN:
264 default:
265 /* We have no info to copy */
266 break;
267 }
268}
269
270struct ipl_info ipl_info; 255struct ipl_info ipl_info;
271EXPORT_SYMBOL_GPL(ipl_info); 256EXPORT_SYMBOL_GPL(ipl_info);
272 257
273static ssize_t ipl_type_show(struct kset *kset, char *page) 258static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
259 char *page)
274{ 260{
275 return sprintf(page, "%s\n", ipl_type_str(ipl_info.type)); 261 return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
276} 262}
277 263
278static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 264static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
279 265
280static ssize_t sys_ipl_device_show(struct kset *kset, char *page) 266static ssize_t sys_ipl_device_show(struct kobject *kobj,
267 struct kobj_attribute *attr, char *page)
281{ 268{
282 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 269 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
283 270
@@ -292,7 +279,7 @@ static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
292 } 279 }
293} 280}
294 281
295static struct subsys_attribute sys_ipl_device_attr = 282static struct kobj_attribute sys_ipl_device_attr =
296 __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL); 283 __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
297 284
298static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr, 285static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
@@ -367,7 +354,8 @@ static struct attribute_group ipl_fcp_attr_group = {
367 354
368/* CCW ipl device attributes */ 355/* CCW ipl device attributes */
369 356
370static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page) 357static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
358 struct kobj_attribute *attr, char *page)
371{ 359{
372 char loadparm[LOADPARM_LEN + 1] = {}; 360 char loadparm[LOADPARM_LEN + 1] = {};
373 361
@@ -379,7 +367,7 @@ static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
379 return sprintf(page, "%s\n", loadparm); 367 return sprintf(page, "%s\n", loadparm);
380} 368}
381 369
382static struct subsys_attribute sys_ipl_ccw_loadparm_attr = 370static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
383 __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); 371 __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
384 372
385static struct attribute *ipl_ccw_attrs[] = { 373static struct attribute *ipl_ccw_attrs[] = {
@@ -418,10 +406,76 @@ static struct attribute_group ipl_unknown_attr_group = {
418 .attrs = ipl_unknown_attrs, 406 .attrs = ipl_unknown_attrs,
419}; 407};
420 408
421static decl_subsys(ipl, NULL, NULL); 409static struct kset *ipl_kset;
410
411static int __init ipl_register_fcp_files(void)
412{
413 int rc;
414
415 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
416 if (rc)
417 goto out;
418 rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
419 if (rc)
420 goto out_ipl_parm;
421 rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
422 if (!rc)
423 goto out;
424
425 sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
426
427out_ipl_parm:
428 sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
429out:
430 return rc;
431}
432
433static void ipl_run(struct shutdown_trigger *trigger)
434{
435 diag308(DIAG308_IPL, NULL);
436 if (MACHINE_IS_VM)
437 __cpcmd("IPL", NULL, 0, NULL);
438 else if (ipl_info.type == IPL_TYPE_CCW)
439 reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
440}
441
442static int ipl_init(void)
443{
444 int rc;
445
446 ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
447 if (!ipl_kset) {
448 rc = -ENOMEM;
449 goto out;
450 }
451 switch (ipl_info.type) {
452 case IPL_TYPE_CCW:
453 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group);
454 break;
455 case IPL_TYPE_FCP:
456 case IPL_TYPE_FCP_DUMP:
457 rc = ipl_register_fcp_files();
458 break;
459 case IPL_TYPE_NSS:
460 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
461 break;
462 default:
463 rc = sysfs_create_group(&ipl_kset->kobj,
464 &ipl_unknown_attr_group);
465 break;
466 }
467out:
468 if (rc)
469 panic("ipl_init failed: rc = %i\n", rc);
470
471 return 0;
472}
473
474static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run,
475 ipl_init};
422 476
423/* 477/*
424 * reipl section 478 * reipl shutdown action: Reboot Linux on shutdown.
425 */ 479 */
426 480
427/* FCP reipl device attributes */ 481/* FCP reipl device attributes */
@@ -465,7 +519,8 @@ static void reipl_get_ascii_loadparm(char *loadparm)
465 strstrip(loadparm); 519 strstrip(loadparm);
466} 520}
467 521
468static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page) 522static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
523 struct kobj_attribute *attr, char *page)
469{ 524{
470 char buf[LOADPARM_LEN + 1]; 525 char buf[LOADPARM_LEN + 1];
471 526
@@ -473,7 +528,8 @@ static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
473 return sprintf(page, "%s\n", buf); 528 return sprintf(page, "%s\n", buf);
474} 529}
475 530
476static ssize_t reipl_ccw_loadparm_store(struct kset *kset, 531static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
532 struct kobj_attribute *attr,
477 const char *buf, size_t len) 533 const char *buf, size_t len)
478{ 534{
479 int i, lp_len; 535 int i, lp_len;
@@ -500,7 +556,7 @@ static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
500 return len; 556 return len;
501} 557}
502 558
503static struct subsys_attribute sys_reipl_ccw_loadparm_attr = 559static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
504 __ATTR(loadparm, 0644, reipl_ccw_loadparm_show, 560 __ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
505 reipl_ccw_loadparm_store); 561 reipl_ccw_loadparm_store);
506 562
@@ -539,7 +595,9 @@ static int reipl_set_type(enum ipl_type type)
539 595
540 switch(type) { 596 switch(type) {
541 case IPL_TYPE_CCW: 597 case IPL_TYPE_CCW:
542 if (MACHINE_IS_VM) 598 if (diag308_set_works)
599 reipl_method = REIPL_METHOD_CCW_DIAG;
600 else if (MACHINE_IS_VM)
543 reipl_method = REIPL_METHOD_CCW_VM; 601 reipl_method = REIPL_METHOD_CCW_VM;
544 else 602 else
545 reipl_method = REIPL_METHOD_CCW_CIO; 603 reipl_method = REIPL_METHOD_CCW_CIO;
@@ -568,13 +626,15 @@ static int reipl_set_type(enum ipl_type type)
568 return 0; 626 return 0;
569} 627}
570 628
571static ssize_t reipl_type_show(struct kset *kset, char *page) 629static ssize_t reipl_type_show(struct kobject *kobj,
630 struct kobj_attribute *attr, char *page)
572{ 631{
573 return sprintf(page, "%s\n", ipl_type_str(reipl_type)); 632 return sprintf(page, "%s\n", ipl_type_str(reipl_type));
574} 633}
575 634
576static ssize_t reipl_type_store(struct kset *kset, const char *buf, 635static ssize_t reipl_type_store(struct kobject *kobj,
577 size_t len) 636 struct kobj_attribute *attr,
637 const char *buf, size_t len)
578{ 638{
579 int rc = -EINVAL; 639 int rc = -EINVAL;
580 640
@@ -587,140 +647,12 @@ static ssize_t reipl_type_store(struct kset *kset, const char *buf,
587 return (rc != 0) ? rc : len; 647 return (rc != 0) ? rc : len;
588} 648}
589 649
590static struct subsys_attribute reipl_type_attr = 650static struct kobj_attribute reipl_type_attr =
591 __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); 651 __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
592
593static decl_subsys(reipl, NULL, NULL);
594
595/*
596 * dump section
597 */
598
599/* FCP dump device attributes */
600
601DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
602 dump_block_fcp->ipl_info.fcp.wwpn);
603DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
604 dump_block_fcp->ipl_info.fcp.lun);
605DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
606 dump_block_fcp->ipl_info.fcp.bootprog);
607DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
608 dump_block_fcp->ipl_info.fcp.br_lba);
609DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
610 dump_block_fcp->ipl_info.fcp.devno);
611
612static struct attribute *dump_fcp_attrs[] = {
613 &sys_dump_fcp_device_attr.attr,
614 &sys_dump_fcp_wwpn_attr.attr,
615 &sys_dump_fcp_lun_attr.attr,
616 &sys_dump_fcp_bootprog_attr.attr,
617 &sys_dump_fcp_br_lba_attr.attr,
618 NULL,
619};
620
621static struct attribute_group dump_fcp_attr_group = {
622 .name = IPL_FCP_STR,
623 .attrs = dump_fcp_attrs,
624};
625
626/* CCW dump device attributes */
627
628DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
629 dump_block_ccw->ipl_info.ccw.devno);
630
631static struct attribute *dump_ccw_attrs[] = {
632 &sys_dump_ccw_device_attr.attr,
633 NULL,
634};
635
636static struct attribute_group dump_ccw_attr_group = {
637 .name = IPL_CCW_STR,
638 .attrs = dump_ccw_attrs,
639};
640
641/* dump type */
642
643static int dump_set_type(enum dump_type type)
644{
645 if (!(dump_capabilities & type))
646 return -EINVAL;
647 switch(type) {
648 case DUMP_TYPE_CCW:
649 if (MACHINE_IS_VM)
650 dump_method = DUMP_METHOD_CCW_VM;
651 else if (diag308_set_works)
652 dump_method = DUMP_METHOD_CCW_DIAG;
653 else
654 dump_method = DUMP_METHOD_CCW_CIO;
655 break;
656 case DUMP_TYPE_FCP:
657 dump_method = DUMP_METHOD_FCP_DIAG;
658 break;
659 default:
660 dump_method = DUMP_METHOD_NONE;
661 }
662 dump_type = type;
663 return 0;
664}
665
666static ssize_t dump_type_show(struct kset *kset, char *page)
667{
668 return sprintf(page, "%s\n", dump_type_str(dump_type));
669}
670
671static ssize_t dump_type_store(struct kset *kset, const char *buf,
672 size_t len)
673{
674 int rc = -EINVAL;
675
676 if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
677 rc = dump_set_type(DUMP_TYPE_NONE);
678 else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
679 rc = dump_set_type(DUMP_TYPE_CCW);
680 else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
681 rc = dump_set_type(DUMP_TYPE_FCP);
682 return (rc != 0) ? rc : len;
683}
684
685static struct subsys_attribute dump_type_attr =
686 __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
687
688static decl_subsys(dump, NULL, NULL);
689
690/*
691 * Shutdown actions section
692 */
693
694static decl_subsys(shutdown_actions, NULL, NULL);
695
696/* on panic */
697
698static ssize_t on_panic_show(struct kset *kset, char *page)
699{
700 return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
701}
702
703static ssize_t on_panic_store(struct kset *kset, const char *buf,
704 size_t len)
705{
706 if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
707 on_panic_action = SHUTDOWN_REIPL;
708 else if (strncmp(buf, SHUTDOWN_DUMP_STR,
709 strlen(SHUTDOWN_DUMP_STR)) == 0)
710 on_panic_action = SHUTDOWN_DUMP;
711 else if (strncmp(buf, SHUTDOWN_STOP_STR,
712 strlen(SHUTDOWN_STOP_STR)) == 0)
713 on_panic_action = SHUTDOWN_STOP;
714 else
715 return -EINVAL;
716
717 return len;
718}
719 652
720static struct subsys_attribute on_panic_attr = 653static struct kset *reipl_kset;
721 __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
722 654
723void do_reipl(void) 655void reipl_run(struct shutdown_trigger *trigger)
724{ 656{
725 struct ccw_dev_id devid; 657 struct ccw_dev_id devid;
726 static char buf[100]; 658 static char buf[100];
@@ -729,8 +661,6 @@ void do_reipl(void)
729 switch (reipl_method) { 661 switch (reipl_method) {
730 case REIPL_METHOD_CCW_CIO: 662 case REIPL_METHOD_CCW_CIO:
731 devid.devno = reipl_block_ccw->ipl_info.ccw.devno; 663 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
732 if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
733 diag308(DIAG308_IPL, NULL);
734 devid.ssid = 0; 664 devid.ssid = 0;
735 reipl_ccw_dev(&devid); 665 reipl_ccw_dev(&devid);
736 break; 666 break;
@@ -771,98 +701,6 @@ void do_reipl(void)
771 default: 701 default:
772 break; 702 break;
773 } 703 }
774 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
775}
776
777static void do_dump(void)
778{
779 struct ccw_dev_id devid;
780 static char buf[100];
781
782 switch (dump_method) {
783 case DUMP_METHOD_CCW_CIO:
784 smp_send_stop();
785 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
786 devid.ssid = 0;
787 reipl_ccw_dev(&devid);
788 break;
789 case DUMP_METHOD_CCW_VM:
790 smp_send_stop();
791 sprintf(buf, "STORE STATUS");
792 __cpcmd(buf, NULL, 0, NULL);
793 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
794 __cpcmd(buf, NULL, 0, NULL);
795 break;
796 case DUMP_METHOD_CCW_DIAG:
797 diag308(DIAG308_SET, dump_block_ccw);
798 diag308(DIAG308_DUMP, NULL);
799 break;
800 case DUMP_METHOD_FCP_DIAG:
801 diag308(DIAG308_SET, dump_block_fcp);
802 diag308(DIAG308_DUMP, NULL);
803 break;
804 case DUMP_METHOD_NONE:
805 default:
806 return;
807 }
808 printk(KERN_EMERG "Dump failed!\n");
809}
810
811/* init functions */
812
813static int __init ipl_register_fcp_files(void)
814{
815 int rc;
816
817 rc = sysfs_create_group(&ipl_subsys.kobj,
818 &ipl_fcp_attr_group);
819 if (rc)
820 goto out;
821 rc = sysfs_create_bin_file(&ipl_subsys.kobj,
822 &ipl_parameter_attr);
823 if (rc)
824 goto out_ipl_parm;
825 rc = sysfs_create_bin_file(&ipl_subsys.kobj,
826 &ipl_scp_data_attr);
827 if (!rc)
828 goto out;
829
830 sysfs_remove_bin_file(&ipl_subsys.kobj, &ipl_parameter_attr);
831
832out_ipl_parm:
833 sysfs_remove_group(&ipl_subsys.kobj, &ipl_fcp_attr_group);
834out:
835 return rc;
836}
837
838static int __init ipl_init(void)
839{
840 int rc;
841
842 rc = firmware_register(&ipl_subsys);
843 if (rc)
844 return rc;
845 switch (ipl_info.type) {
846 case IPL_TYPE_CCW:
847 rc = sysfs_create_group(&ipl_subsys.kobj,
848 &ipl_ccw_attr_group);
849 break;
850 case IPL_TYPE_FCP:
851 case IPL_TYPE_FCP_DUMP:
852 rc = ipl_register_fcp_files();
853 break;
854 case IPL_TYPE_NSS:
855 rc = sysfs_create_group(&ipl_subsys.kobj,
856 &ipl_nss_attr_group);
857 break;
858 default:
859 rc = sysfs_create_group(&ipl_subsys.kobj,
860 &ipl_unknown_attr_group);
861 break;
862 }
863 if (rc)
864 firmware_unregister(&ipl_subsys);
865 return rc;
866} 704}
867 705
868static void __init reipl_probe(void) 706static void __init reipl_probe(void)
@@ -883,7 +721,7 @@ static int __init reipl_nss_init(void)
883 721
884 if (!MACHINE_IS_VM) 722 if (!MACHINE_IS_VM)
885 return 0; 723 return 0;
886 rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_nss_attr_group); 724 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
887 if (rc) 725 if (rc)
888 return rc; 726 return rc;
889 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1); 727 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
@@ -898,7 +736,7 @@ static int __init reipl_ccw_init(void)
898 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); 736 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
899 if (!reipl_block_ccw) 737 if (!reipl_block_ccw)
900 return -ENOMEM; 738 return -ENOMEM;
901 rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_ccw_attr_group); 739 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group);
902 if (rc) { 740 if (rc) {
903 free_page((unsigned long)reipl_block_ccw); 741 free_page((unsigned long)reipl_block_ccw);
904 return rc; 742 return rc;
@@ -907,6 +745,7 @@ static int __init reipl_ccw_init(void)
907 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; 745 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
908 reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; 746 reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
909 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; 747 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
748 reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID;
910 /* check if read scp info worked and set loadparm */ 749 /* check if read scp info worked and set loadparm */
911 if (sclp_ipl_info.is_valid) 750 if (sclp_ipl_info.is_valid)
912 memcpy(reipl_block_ccw->ipl_info.ccw.load_param, 751 memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
@@ -915,8 +754,7 @@ static int __init reipl_ccw_init(void)
915 /* read scp info failed: set empty loadparm (EBCDIC blanks) */ 754 /* read scp info failed: set empty loadparm (EBCDIC blanks) */
916 memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40, 755 memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
917 LOADPARM_LEN); 756 LOADPARM_LEN);
918 /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ 757 if (!MACHINE_IS_VM && !diag308_set_works)
919 if (!MACHINE_IS_VM)
920 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; 758 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
921 if (ipl_info.type == IPL_TYPE_CCW) 759 if (ipl_info.type == IPL_TYPE_CCW)
922 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 760 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
@@ -936,7 +774,7 @@ static int __init reipl_fcp_init(void)
936 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); 774 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
937 if (!reipl_block_fcp) 775 if (!reipl_block_fcp)
938 return -ENOMEM; 776 return -ENOMEM;
939 rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_fcp_attr_group); 777 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group);
940 if (rc) { 778 if (rc) {
941 free_page((unsigned long)reipl_block_fcp); 779 free_page((unsigned long)reipl_block_fcp);
942 return rc; 780 return rc;
@@ -954,16 +792,16 @@ static int __init reipl_fcp_init(void)
954 return 0; 792 return 0;
955} 793}
956 794
957static int __init reipl_init(void) 795static int reipl_init(void)
958{ 796{
959 int rc; 797 int rc;
960 798
961 rc = firmware_register(&reipl_subsys); 799 reipl_kset = kset_create_and_add("reipl", NULL, firmware_kobj);
962 if (rc) 800 if (!reipl_kset)
963 return rc; 801 return -ENOMEM;
964 rc = subsys_create_file(&reipl_subsys, &reipl_type_attr); 802 rc = sysfs_create_file(&reipl_kset->kobj, &reipl_type_attr.attr);
965 if (rc) { 803 if (rc) {
966 firmware_unregister(&reipl_subsys); 804 kset_unregister(reipl_kset);
967 return rc; 805 return rc;
968 } 806 }
969 rc = reipl_ccw_init(); 807 rc = reipl_ccw_init();
@@ -981,6 +819,140 @@ static int __init reipl_init(void)
981 return 0; 819 return 0;
982} 820}
983 821
822static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR,
823 reipl_run, reipl_init};
824
825/*
826 * dump shutdown action: Dump Linux on shutdown.
827 */
828
829/* FCP dump device attributes */
830
831DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
832 dump_block_fcp->ipl_info.fcp.wwpn);
833DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
834 dump_block_fcp->ipl_info.fcp.lun);
835DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
836 dump_block_fcp->ipl_info.fcp.bootprog);
837DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
838 dump_block_fcp->ipl_info.fcp.br_lba);
839DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
840 dump_block_fcp->ipl_info.fcp.devno);
841
842static struct attribute *dump_fcp_attrs[] = {
843 &sys_dump_fcp_device_attr.attr,
844 &sys_dump_fcp_wwpn_attr.attr,
845 &sys_dump_fcp_lun_attr.attr,
846 &sys_dump_fcp_bootprog_attr.attr,
847 &sys_dump_fcp_br_lba_attr.attr,
848 NULL,
849};
850
851static struct attribute_group dump_fcp_attr_group = {
852 .name = IPL_FCP_STR,
853 .attrs = dump_fcp_attrs,
854};
855
856/* CCW dump device attributes */
857
858DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
859 dump_block_ccw->ipl_info.ccw.devno);
860
861static struct attribute *dump_ccw_attrs[] = {
862 &sys_dump_ccw_device_attr.attr,
863 NULL,
864};
865
866static struct attribute_group dump_ccw_attr_group = {
867 .name = IPL_CCW_STR,
868 .attrs = dump_ccw_attrs,
869};
870
871/* dump type */
872
873static int dump_set_type(enum dump_type type)
874{
875 if (!(dump_capabilities & type))
876 return -EINVAL;
877 switch (type) {
878 case DUMP_TYPE_CCW:
879 if (diag308_set_works)
880 dump_method = DUMP_METHOD_CCW_DIAG;
881 else if (MACHINE_IS_VM)
882 dump_method = DUMP_METHOD_CCW_VM;
883 else
884 dump_method = DUMP_METHOD_CCW_CIO;
885 break;
886 case DUMP_TYPE_FCP:
887 dump_method = DUMP_METHOD_FCP_DIAG;
888 break;
889 default:
890 dump_method = DUMP_METHOD_NONE;
891 }
892 dump_type = type;
893 return 0;
894}
895
896static ssize_t dump_type_show(struct kobject *kobj,
897 struct kobj_attribute *attr, char *page)
898{
899 return sprintf(page, "%s\n", dump_type_str(dump_type));
900}
901
902static ssize_t dump_type_store(struct kobject *kobj,
903 struct kobj_attribute *attr,
904 const char *buf, size_t len)
905{
906 int rc = -EINVAL;
907
908 if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
909 rc = dump_set_type(DUMP_TYPE_NONE);
910 else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
911 rc = dump_set_type(DUMP_TYPE_CCW);
912 else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
913 rc = dump_set_type(DUMP_TYPE_FCP);
914 return (rc != 0) ? rc : len;
915}
916
917static struct kobj_attribute dump_type_attr =
918 __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
919
920static struct kset *dump_kset;
921
922static void dump_run(struct shutdown_trigger *trigger)
923{
924 struct ccw_dev_id devid;
925 static char buf[100];
926
927 switch (dump_method) {
928 case DUMP_METHOD_CCW_CIO:
929 smp_send_stop();
930 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
931 devid.ssid = 0;
932 reipl_ccw_dev(&devid);
933 break;
934 case DUMP_METHOD_CCW_VM:
935 smp_send_stop();
936 sprintf(buf, "STORE STATUS");
937 __cpcmd(buf, NULL, 0, NULL);
938 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
939 __cpcmd(buf, NULL, 0, NULL);
940 break;
941 case DUMP_METHOD_CCW_DIAG:
942 diag308(DIAG308_SET, dump_block_ccw);
943 diag308(DIAG308_DUMP, NULL);
944 break;
945 case DUMP_METHOD_FCP_DIAG:
946 diag308(DIAG308_SET, dump_block_fcp);
947 diag308(DIAG308_DUMP, NULL);
948 break;
949 case DUMP_METHOD_NONE:
950 default:
951 return;
952 }
953 printk(KERN_EMERG "Dump failed!\n");
954}
955
984static int __init dump_ccw_init(void) 956static int __init dump_ccw_init(void)
985{ 957{
986 int rc; 958 int rc;
@@ -988,7 +960,7 @@ static int __init dump_ccw_init(void)
988 dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); 960 dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
989 if (!dump_block_ccw) 961 if (!dump_block_ccw)
990 return -ENOMEM; 962 return -ENOMEM;
991 rc = sysfs_create_group(&dump_subsys.kobj, &dump_ccw_attr_group); 963 rc = sysfs_create_group(&dump_kset->kobj, &dump_ccw_attr_group);
992 if (rc) { 964 if (rc) {
993 free_page((unsigned long)dump_block_ccw); 965 free_page((unsigned long)dump_block_ccw);
994 return rc; 966 return rc;
@@ -1012,7 +984,7 @@ static int __init dump_fcp_init(void)
1012 dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); 984 dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
1013 if (!dump_block_fcp) 985 if (!dump_block_fcp)
1014 return -ENOMEM; 986 return -ENOMEM;
1015 rc = sysfs_create_group(&dump_subsys.kobj, &dump_fcp_attr_group); 987 rc = sysfs_create_group(&dump_kset->kobj, &dump_fcp_attr_group);
1016 if (rc) { 988 if (rc) {
1017 free_page((unsigned long)dump_block_fcp); 989 free_page((unsigned long)dump_block_fcp);
1018 return rc; 990 return rc;
@@ -1026,33 +998,16 @@ static int __init dump_fcp_init(void)
1026 return 0; 998 return 0;
1027} 999}
1028 1000
1029#define SHUTDOWN_ON_PANIC_PRIO 0 1001static int dump_init(void)
1030
1031static int shutdown_on_panic_notify(struct notifier_block *self,
1032 unsigned long event, void *data)
1033{
1034 if (on_panic_action == SHUTDOWN_DUMP)
1035 do_dump();
1036 else if (on_panic_action == SHUTDOWN_REIPL)
1037 do_reipl();
1038 return NOTIFY_OK;
1039}
1040
1041static struct notifier_block shutdown_on_panic_nb = {
1042 .notifier_call = shutdown_on_panic_notify,
1043 .priority = SHUTDOWN_ON_PANIC_PRIO
1044};
1045
1046static int __init dump_init(void)
1047{ 1002{
1048 int rc; 1003 int rc;
1049 1004
1050 rc = firmware_register(&dump_subsys); 1005 dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
1051 if (rc) 1006 if (!dump_kset)
1052 return rc; 1007 return -ENOMEM;
1053 rc = subsys_create_file(&dump_subsys, &dump_type_attr); 1008 rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
1054 if (rc) { 1009 if (rc) {
1055 firmware_unregister(&dump_subsys); 1010 kset_unregister(dump_kset);
1056 return rc; 1011 return rc;
1057 } 1012 }
1058 rc = dump_ccw_init(); 1013 rc = dump_ccw_init();
@@ -1065,46 +1020,381 @@ static int __init dump_init(void)
1065 return 0; 1020 return 0;
1066} 1021}
1067 1022
1068static int __init shutdown_actions_init(void) 1023static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR,
1024 dump_run, dump_init};
1025
1026/*
1027 * vmcmd shutdown action: Trigger vm command on shutdown.
1028 */
1029
1030static char vmcmd_on_reboot[128];
1031static char vmcmd_on_panic[128];
1032static char vmcmd_on_halt[128];
1033static char vmcmd_on_poff[128];
1034
1035DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
1036DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
1037DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
1038DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
1039
1040static struct attribute *vmcmd_attrs[] = {
1041 &sys_vmcmd_on_reboot_attr.attr,
1042 &sys_vmcmd_on_panic_attr.attr,
1043 &sys_vmcmd_on_halt_attr.attr,
1044 &sys_vmcmd_on_poff_attr.attr,
1045 NULL,
1046};
1047
1048static struct attribute_group vmcmd_attr_group = {
1049 .attrs = vmcmd_attrs,
1050};
1051
1052static struct kset *vmcmd_kset;
1053
1054static void vmcmd_run(struct shutdown_trigger *trigger)
1055{
1056 char *cmd, *next_cmd;
1057
1058 if (strcmp(trigger->name, ON_REIPL_STR) == 0)
1059 cmd = vmcmd_on_reboot;
1060 else if (strcmp(trigger->name, ON_PANIC_STR) == 0)
1061 cmd = vmcmd_on_panic;
1062 else if (strcmp(trigger->name, ON_HALT_STR) == 0)
1063 cmd = vmcmd_on_halt;
1064 else if (strcmp(trigger->name, ON_POFF_STR) == 0)
1065 cmd = vmcmd_on_poff;
1066 else
1067 return;
1068
1069 if (strlen(cmd) == 0)
1070 return;
1071 do {
1072 next_cmd = strchr(cmd, '\n');
1073 if (next_cmd) {
1074 next_cmd[0] = 0;
1075 next_cmd += 1;
1076 }
1077 __cpcmd(cmd, NULL, 0, NULL);
1078 cmd = next_cmd;
1079 } while (cmd != NULL);
1080}
1081
1082static int vmcmd_init(void)
1069{ 1083{
1070 int rc; 1084 if (!MACHINE_IS_VM)
1085 return -ENOTSUPP;
1086 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
1087 if (!vmcmd_kset)
1088 return -ENOMEM;
1089 return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group);
1090}
1071 1091
1072 rc = firmware_register(&shutdown_actions_subsys); 1092static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
1073 if (rc) 1093 vmcmd_run, vmcmd_init};
1074 return rc; 1094
1075 rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr); 1095/*
1076 if (rc) { 1096 * stop shutdown action: Stop Linux on shutdown.
1077 firmware_unregister(&shutdown_actions_subsys); 1097 */
1078 return rc; 1098
1099static void stop_run(struct shutdown_trigger *trigger)
1100{
1101 if (strcmp(trigger->name, ON_PANIC_STR) == 0)
1102 disabled_wait((unsigned long) __builtin_return_address(0));
1103 else {
1104 signal_processor(smp_processor_id(), sigp_stop);
1105 for (;;);
1079 } 1106 }
1080 atomic_notifier_chain_register(&panic_notifier_list,
1081 &shutdown_on_panic_nb);
1082 return 0;
1083} 1107}
1084 1108
1085static int __init s390_ipl_init(void) 1109static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
1110 stop_run, NULL};
1111
1112/* action list */
1113
1114static struct shutdown_action *shutdown_actions_list[] = {
1115 &ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action};
1116#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
1117
1118/*
1119 * Trigger section
1120 */
1121
1122static struct kset *shutdown_actions_kset;
1123
1124static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
1125 size_t len)
1086{ 1126{
1087 int rc; 1127 int i;
1128 for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
1129 if (!shutdown_actions_list[i])
1130 continue;
1131 if (strncmp(buf, shutdown_actions_list[i]->name,
1132 strlen(shutdown_actions_list[i]->name)) == 0) {
1133 trigger->action = shutdown_actions_list[i];
1134 return len;
1135 }
1136 }
1137 return -EINVAL;
1138}
1088 1139
1089 sclp_get_ipl_info(&sclp_ipl_info); 1140/* on reipl */
1141
1142static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
1143 &reipl_action};
1144
1145static ssize_t on_reboot_show(struct kobject *kobj,
1146 struct kobj_attribute *attr, char *page)
1147{
1148 return sprintf(page, "%s\n", on_reboot_trigger.action->name);
1149}
1150
1151static ssize_t on_reboot_store(struct kobject *kobj,
1152 struct kobj_attribute *attr,
1153 const char *buf, size_t len)
1154{
1155 return set_trigger(buf, &on_reboot_trigger, len);
1156}
1157
1158static struct kobj_attribute on_reboot_attr =
1159 __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
1160
1161static void do_machine_restart(char *__unused)
1162{
1163 smp_send_stop();
1164 on_reboot_trigger.action->fn(&on_reboot_trigger);
1165 reipl_run(NULL);
1166}
1167void (*_machine_restart)(char *command) = do_machine_restart;
1168
1169/* on panic */
1170
1171static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
1172
1173static ssize_t on_panic_show(struct kobject *kobj,
1174 struct kobj_attribute *attr, char *page)
1175{
1176 return sprintf(page, "%s\n", on_panic_trigger.action->name);
1177}
1178
1179static ssize_t on_panic_store(struct kobject *kobj,
1180 struct kobj_attribute *attr,
1181 const char *buf, size_t len)
1182{
1183 return set_trigger(buf, &on_panic_trigger, len);
1184}
1185
1186static struct kobj_attribute on_panic_attr =
1187 __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
1188
1189static void do_panic(void)
1190{
1191 on_panic_trigger.action->fn(&on_panic_trigger);
1192 stop_run(&on_panic_trigger);
1193}
1194
1195/* on halt */
1196
1197static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
1198
1199static ssize_t on_halt_show(struct kobject *kobj,
1200 struct kobj_attribute *attr, char *page)
1201{
1202 return sprintf(page, "%s\n", on_halt_trigger.action->name);
1203}
1204
1205static ssize_t on_halt_store(struct kobject *kobj,
1206 struct kobj_attribute *attr,
1207 const char *buf, size_t len)
1208{
1209 return set_trigger(buf, &on_halt_trigger, len);
1210}
1211
1212static struct kobj_attribute on_halt_attr =
1213 __ATTR(on_halt, 0644, on_halt_show, on_halt_store);
1214
1215
1216static void do_machine_halt(void)
1217{
1218 smp_send_stop();
1219 on_halt_trigger.action->fn(&on_halt_trigger);
1220 stop_run(&on_halt_trigger);
1221}
1222void (*_machine_halt)(void) = do_machine_halt;
1223
1224/* on power off */
1225
1226static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
1227
1228static ssize_t on_poff_show(struct kobject *kobj,
1229 struct kobj_attribute *attr, char *page)
1230{
1231 return sprintf(page, "%s\n", on_poff_trigger.action->name);
1232}
1233
1234static ssize_t on_poff_store(struct kobject *kobj,
1235 struct kobj_attribute *attr,
1236 const char *buf, size_t len)
1237{
1238 return set_trigger(buf, &on_poff_trigger, len);
1239}
1240
1241static struct kobj_attribute on_poff_attr =
1242 __ATTR(on_poff, 0644, on_poff_show, on_poff_store);
1243
1244
1245static void do_machine_power_off(void)
1246{
1247 smp_send_stop();
1248 on_poff_trigger.action->fn(&on_poff_trigger);
1249 stop_run(&on_poff_trigger);
1250}
1251void (*_machine_power_off)(void) = do_machine_power_off;
1252
1253static void __init shutdown_triggers_init(void)
1254{
1255 shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
1256 firmware_kobj);
1257 if (!shutdown_actions_kset)
1258 goto fail;
1259 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1260 &on_reboot_attr.attr))
1261 goto fail;
1262 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1263 &on_panic_attr.attr))
1264 goto fail;
1265 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1266 &on_halt_attr.attr))
1267 goto fail;
1268 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1269 &on_poff_attr.attr))
1270 goto fail;
1271
1272 return;
1273fail:
1274 panic("shutdown_triggers_init failed\n");
1275}
1276
1277static void __init shutdown_actions_init(void)
1278{
1279 int i;
1280
1281 for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
1282 if (!shutdown_actions_list[i]->init)
1283 continue;
1284 if (shutdown_actions_list[i]->init())
1285 shutdown_actions_list[i] = NULL;
1286 }
1287}
1288
1289static int __init s390_ipl_init(void)
1290{
1090 reipl_probe(); 1291 reipl_probe();
1091 rc = ipl_init(); 1292 sclp_get_ipl_info(&sclp_ipl_info);
1092 if (rc) 1293 shutdown_actions_init();
1093 return rc; 1294 shutdown_triggers_init();
1094 rc = reipl_init();
1095 if (rc)
1096 return rc;
1097 rc = dump_init();
1098 if (rc)
1099 return rc;
1100 rc = shutdown_actions_init();
1101 if (rc)
1102 return rc;
1103 return 0; 1295 return 0;
1104} 1296}
1105 1297
1106__initcall(s390_ipl_init); 1298__initcall(s390_ipl_init);
1107 1299
1300static void __init strncpy_skip_quote(char *dst, char *src, int n)
1301{
1302 int sx, dx;
1303
1304 dx = 0;
1305 for (sx = 0; src[sx] != 0; sx++) {
1306 if (src[sx] == '"')
1307 continue;
1308 dst[dx++] = src[sx];
1309 if (dx >= n)
1310 break;
1311 }
1312}
1313
1314static int __init vmcmd_on_reboot_setup(char *str)
1315{
1316 if (!MACHINE_IS_VM)
1317 return 1;
1318 strncpy_skip_quote(vmcmd_on_reboot, str, 127);
1319 vmcmd_on_reboot[127] = 0;
1320 on_reboot_trigger.action = &vmcmd_action;
1321 return 1;
1322}
1323__setup("vmreboot=", vmcmd_on_reboot_setup);
1324
1325static int __init vmcmd_on_panic_setup(char *str)
1326{
1327 if (!MACHINE_IS_VM)
1328 return 1;
1329 strncpy_skip_quote(vmcmd_on_panic, str, 127);
1330 vmcmd_on_panic[127] = 0;
1331 on_panic_trigger.action = &vmcmd_action;
1332 return 1;
1333}
1334__setup("vmpanic=", vmcmd_on_panic_setup);
1335
1336static int __init vmcmd_on_halt_setup(char *str)
1337{
1338 if (!MACHINE_IS_VM)
1339 return 1;
1340 strncpy_skip_quote(vmcmd_on_halt, str, 127);
1341 vmcmd_on_halt[127] = 0;
1342 on_halt_trigger.action = &vmcmd_action;
1343 return 1;
1344}
1345__setup("vmhalt=", vmcmd_on_halt_setup);
1346
1347static int __init vmcmd_on_poff_setup(char *str)
1348{
1349 if (!MACHINE_IS_VM)
1350 return 1;
1351 strncpy_skip_quote(vmcmd_on_poff, str, 127);
1352 vmcmd_on_poff[127] = 0;
1353 on_poff_trigger.action = &vmcmd_action;
1354 return 1;
1355}
1356__setup("vmpoff=", vmcmd_on_poff_setup);
1357
1358static int on_panic_notify(struct notifier_block *self,
1359 unsigned long event, void *data)
1360{
1361 do_panic();
1362 return NOTIFY_OK;
1363}
1364
1365static struct notifier_block on_panic_nb = {
1366 .notifier_call = on_panic_notify,
1367 .priority = 0,
1368};
1369
1370void __init setup_ipl(void)
1371{
1372 ipl_info.type = get_ipl_type();
1373 switch (ipl_info.type) {
1374 case IPL_TYPE_CCW:
1375 ipl_info.data.ccw.dev_id.devno = ipl_devno;
1376 ipl_info.data.ccw.dev_id.ssid = 0;
1377 break;
1378 case IPL_TYPE_FCP:
1379 case IPL_TYPE_FCP_DUMP:
1380 ipl_info.data.fcp.dev_id.devno =
1381 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
1382 ipl_info.data.fcp.dev_id.ssid = 0;
1383 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
1384 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
1385 break;
1386 case IPL_TYPE_NSS:
1387 strncpy(ipl_info.data.nss.name, kernel_nss_name,
1388 sizeof(ipl_info.data.nss.name));
1389 break;
1390 case IPL_TYPE_UNKNOWN:
1391 default:
1392 /* We have no info to copy */
1393 break;
1394 }
1395 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
1396}
1397
1108void __init ipl_save_parameters(void) 1398void __init ipl_save_parameters(void)
1109{ 1399{
1110 struct cio_iplinfo iplinfo; 1400 struct cio_iplinfo iplinfo;
@@ -1185,3 +1475,4 @@ void s390_reset_system(void)
1185 1475
1186 do_reset_calls(); 1476 do_reset_calls();
1187} 1477}
1478
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 29f7884b4ffa..0e7aca039307 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -36,7 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/notifier.h> 38#include <linux/notifier.h>
39 39#include <linux/utsname.h>
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/pgtable.h> 41#include <asm/pgtable.h>
42#include <asm/system.h> 42#include <asm/system.h>
@@ -182,13 +182,15 @@ void cpu_idle(void)
182 182
183void show_regs(struct pt_regs *regs) 183void show_regs(struct pt_regs *regs)
184{ 184{
185 struct task_struct *tsk = current; 185 print_modules();
186 186 printk("CPU: %d %s %s %.*s\n",
187 printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted()); 187 task_thread_info(current)->cpu, print_tainted(),
188 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 188 init_utsname()->release,
189 current->comm, task_pid_nr(current), (void *) tsk, 189 (int)strcspn(init_utsname()->version, " "),
190 (void *) tsk->thread.ksp); 190 init_utsname()->version);
191 191 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
192 current->comm, current->pid, current,
193 (void *) current->thread.ksp);
192 show_registers(regs); 194 show_registers(regs);
193 /* Show stack backtrace if pt_regs is from kernel mode */ 195 /* Show stack backtrace if pt_regs is from kernel mode */
194 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 196 if (!(regs->psw.mask & PSW_MASK_PSTATE))
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 1d81bf9488ae..6e036bae9875 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -86,13 +86,13 @@ FixPerRegisters(struct task_struct *task)
86 per_info->control_regs.bits.storage_alt_space_ctl = 0; 86 per_info->control_regs.bits.storage_alt_space_ctl = 0;
87} 87}
88 88
89static void set_single_step(struct task_struct *task) 89void user_enable_single_step(struct task_struct *task)
90{ 90{
91 task->thread.per_info.single_step = 1; 91 task->thread.per_info.single_step = 1;
92 FixPerRegisters(task); 92 FixPerRegisters(task);
93} 93}
94 94
95static void clear_single_step(struct task_struct *task) 95void user_disable_single_step(struct task_struct *task)
96{ 96{
97 task->thread.per_info.single_step = 0; 97 task->thread.per_info.single_step = 0;
98 FixPerRegisters(task); 98 FixPerRegisters(task);
@@ -107,7 +107,7 @@ void
107ptrace_disable(struct task_struct *child) 107ptrace_disable(struct task_struct *child)
108{ 108{
109 /* make sure the single step bit is not set. */ 109 /* make sure the single step bit is not set. */
110 clear_single_step(child); 110 user_disable_single_step(child);
111} 111}
112 112
113#ifndef CONFIG_64BIT 113#ifndef CONFIG_64BIT
@@ -651,7 +651,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
651 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 651 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
652 child->exit_code = data; 652 child->exit_code = data;
653 /* make sure the single step bit is not set. */ 653 /* make sure the single step bit is not set. */
654 clear_single_step(child); 654 user_disable_single_step(child);
655 wake_up_process(child); 655 wake_up_process(child);
656 return 0; 656 return 0;
657 657
@@ -665,7 +665,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
665 return 0; 665 return 0;
666 child->exit_code = SIGKILL; 666 child->exit_code = SIGKILL;
667 /* make sure the single step bit is not set. */ 667 /* make sure the single step bit is not set. */
668 clear_single_step(child); 668 user_disable_single_step(child);
669 wake_up_process(child); 669 wake_up_process(child);
670 return 0; 670 return 0;
671 671
@@ -675,10 +675,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
675 return -EIO; 675 return -EIO;
676 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 676 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
677 child->exit_code = data; 677 child->exit_code = data;
678 if (data) 678 user_enable_single_step(child);
679 set_tsk_thread_flag(child, TIF_SINGLE_STEP);
680 else
681 set_single_step(child);
682 /* give it a chance to run. */ 679 /* give it a chance to run. */
683 wake_up_process(child); 680 wake_up_process(child);
684 return 0; 681 return 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 577aa7dd660e..766c783bd7a7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -126,75 +126,6 @@ void __cpuinit cpu_init(void)
126} 126}
127 127
128/* 128/*
129 * VM halt and poweroff setup routines
130 */
131char vmhalt_cmd[128] = "";
132char vmpoff_cmd[128] = "";
133static char vmpanic_cmd[128] = "";
134
135static void strncpy_skip_quote(char *dst, char *src, int n)
136{
137 int sx, dx;
138
139 dx = 0;
140 for (sx = 0; src[sx] != 0; sx++) {
141 if (src[sx] == '"') continue;
142 dst[dx++] = src[sx];
143 if (dx >= n) break;
144 }
145}
146
147static int __init vmhalt_setup(char *str)
148{
149 strncpy_skip_quote(vmhalt_cmd, str, 127);
150 vmhalt_cmd[127] = 0;
151 return 1;
152}
153
154__setup("vmhalt=", vmhalt_setup);
155
156static int __init vmpoff_setup(char *str)
157{
158 strncpy_skip_quote(vmpoff_cmd, str, 127);
159 vmpoff_cmd[127] = 0;
160 return 1;
161}
162
163__setup("vmpoff=", vmpoff_setup);
164
165static int vmpanic_notify(struct notifier_block *self, unsigned long event,
166 void *data)
167{
168 if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
169 cpcmd(vmpanic_cmd, NULL, 0, NULL);
170
171 return NOTIFY_OK;
172}
173
174#define PANIC_PRI_VMPANIC 0
175
176static struct notifier_block vmpanic_nb = {
177 .notifier_call = vmpanic_notify,
178 .priority = PANIC_PRI_VMPANIC
179};
180
181static int __init vmpanic_setup(char *str)
182{
183 static int register_done __initdata = 0;
184
185 strncpy_skip_quote(vmpanic_cmd, str, 127);
186 vmpanic_cmd[127] = 0;
187 if (!register_done) {
188 register_done = 1;
189 atomic_notifier_chain_register(&panic_notifier_list,
190 &vmpanic_nb);
191 }
192 return 1;
193}
194
195__setup("vmpanic=", vmpanic_setup);
196
197/*
198 * condev= and conmode= setup parameter. 129 * condev= and conmode= setup parameter.
199 */ 130 */
200 131
@@ -308,38 +239,6 @@ static void __init setup_zfcpdump(unsigned int console_devno)
308static inline void setup_zfcpdump(unsigned int console_devno) {} 239static inline void setup_zfcpdump(unsigned int console_devno) {}
309#endif /* CONFIG_ZFCPDUMP */ 240#endif /* CONFIG_ZFCPDUMP */
310 241
311#ifdef CONFIG_SMP
312void (*_machine_restart)(char *command) = machine_restart_smp;
313void (*_machine_halt)(void) = machine_halt_smp;
314void (*_machine_power_off)(void) = machine_power_off_smp;
315#else
316/*
317 * Reboot, halt and power_off routines for non SMP.
318 */
319static void do_machine_restart_nonsmp(char * __unused)
320{
321 do_reipl();
322}
323
324static void do_machine_halt_nonsmp(void)
325{
326 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
327 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
328 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
329}
330
331static void do_machine_power_off_nonsmp(void)
332{
333 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
334 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
335 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
336}
337
338void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
339void (*_machine_halt)(void) = do_machine_halt_nonsmp;
340void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
341#endif
342
343 /* 242 /*
344 * Reboot, halt and power_off stubs. They just call _machine_restart, 243 * Reboot, halt and power_off stubs. They just call _machine_restart,
345 * _machine_halt or _machine_power_off. 244 * _machine_halt or _machine_power_off.
@@ -559,7 +458,9 @@ setup_resources(void)
559 data_resource.start = (unsigned long) &_etext; 458 data_resource.start = (unsigned long) &_etext;
560 data_resource.end = (unsigned long) &_edata - 1; 459 data_resource.end = (unsigned long) &_edata - 1;
561 460
562 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 461 for (i = 0; i < MEMORY_CHUNKS; i++) {
462 if (!memory_chunk[i].size)
463 continue;
563 res = alloc_bootmem_low(sizeof(struct resource)); 464 res = alloc_bootmem_low(sizeof(struct resource));
564 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 465 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
565 switch (memory_chunk[i].type) { 466 switch (memory_chunk[i].type) {
@@ -617,7 +518,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
617static void __init setup_memory_end(void) 518static void __init setup_memory_end(void)
618{ 519{
619 unsigned long memory_size; 520 unsigned long memory_size;
620 unsigned long max_mem, max_phys; 521 unsigned long max_mem;
621 int i; 522 int i;
622 523
623#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 524#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
@@ -625,10 +526,31 @@ static void __init setup_memory_end(void)
625 memory_end = ZFCPDUMP_HSA_SIZE; 526 memory_end = ZFCPDUMP_HSA_SIZE;
626#endif 527#endif
627 memory_size = 0; 528 memory_size = 0;
628 max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
629 memory_end &= PAGE_MASK; 529 memory_end &= PAGE_MASK;
630 530
631 max_mem = memory_end ? min(max_phys, memory_end) : max_phys; 531 max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
532 memory_end = min(max_mem, memory_end);
533
534 /*
535 * Make sure all chunks are MAX_ORDER aligned so we don't need the
536 * extra checks that HOLES_IN_ZONE would require.
537 */
538 for (i = 0; i < MEMORY_CHUNKS; i++) {
539 unsigned long start, end;
540 struct mem_chunk *chunk;
541 unsigned long align;
542
543 chunk = &memory_chunk[i];
544 align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
545 start = (chunk->addr + align - 1) & ~(align - 1);
546 end = (chunk->addr + chunk->size) & ~(align - 1);
547 if (start >= end)
548 memset(chunk, 0, sizeof(*chunk));
549 else {
550 chunk->addr = start;
551 chunk->size = end - start;
552 }
553 }
632 554
633 for (i = 0; i < MEMORY_CHUNKS; i++) { 555 for (i = 0; i < MEMORY_CHUNKS; i++) {
634 struct mem_chunk *chunk = &memory_chunk[i]; 556 struct mem_chunk *chunk = &memory_chunk[i];
@@ -890,7 +812,7 @@ setup_arch(char **cmdline_p)
890 812
891 parse_early_param(); 813 parse_early_param();
892 814
893 setup_ipl_info(); 815 setup_ipl();
894 setup_memory_end(); 816 setup_memory_end();
895 setup_addressing_mode(); 817 setup_addressing_mode();
896 setup_memory(); 818 setup_memory();
@@ -899,7 +821,6 @@ setup_arch(char **cmdline_p)
899 821
900 cpu_init(); 822 cpu_init();
901 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 823 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
902 smp_setup_cpu_possible_map();
903 824
904 /* 825 /*
905 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). 826 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
@@ -920,7 +841,7 @@ setup_arch(char **cmdline_p)
920 841
921void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) 842void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
922{ 843{
923 printk("cpu %d " 844 printk(KERN_INFO "cpu %d "
924#ifdef CONFIG_SMP 845#ifdef CONFIG_SMP
925 "phys_idx=%d " 846 "phys_idx=%d "
926#endif 847#endif
@@ -996,7 +917,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
996static void c_stop(struct seq_file *m, void *v) 917static void c_stop(struct seq_file *m, void *v)
997{ 918{
998} 919}
999struct seq_operations cpuinfo_op = { 920const struct seq_operations cpuinfo_op = {
1000 .start = c_start, 921 .start = c_start,
1001 .next = c_next, 922 .next = c_next,
1002 .stop = c_stop, 923 .stop = c_stop,
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d264671c1b71..4449bf32cbf1 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -471,6 +471,7 @@ void do_signal(struct pt_regs *regs)
471 471
472 if (signr > 0) { 472 if (signr > 0) {
473 /* Whee! Actually deliver the signal. */ 473 /* Whee! Actually deliver the signal. */
474 int ret;
474#ifdef CONFIG_COMPAT 475#ifdef CONFIG_COMPAT
475 if (test_thread_flag(TIF_31BIT)) { 476 if (test_thread_flag(TIF_31BIT)) {
476 extern int handle_signal32(unsigned long sig, 477 extern int handle_signal32(unsigned long sig,
@@ -478,15 +479,12 @@ void do_signal(struct pt_regs *regs)
478 siginfo_t *info, 479 siginfo_t *info,
479 sigset_t *oldset, 480 sigset_t *oldset,
480 struct pt_regs *regs); 481 struct pt_regs *regs);
481 if (handle_signal32( 482 ret = handle_signal32(signr, &ka, &info, oldset, regs);
482 signr, &ka, &info, oldset, regs) == 0) {
483 if (test_thread_flag(TIF_RESTORE_SIGMASK))
484 clear_thread_flag(TIF_RESTORE_SIGMASK);
485 }
486 return;
487 } 483 }
484 else
488#endif 485#endif
489 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { 486 ret = handle_signal(signr, &ka, &info, oldset, regs);
487 if (!ret) {
490 /* 488 /*
491 * A signal was successfully delivered; the saved 489 * A signal was successfully delivered; the saved
492 * sigmask will have been stored in the signal frame, 490 * sigmask will have been stored in the signal frame,
@@ -495,6 +493,14 @@ void do_signal(struct pt_regs *regs)
495 */ 493 */
496 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 494 if (test_thread_flag(TIF_RESTORE_SIGMASK))
497 clear_thread_flag(TIF_RESTORE_SIGMASK); 495 clear_thread_flag(TIF_RESTORE_SIGMASK);
496
497 /*
498 * If we would have taken a single-step trap
499 * for a normal instruction, act like we took
500 * one for the handler setup.
501 */
502 if (current->thread.per_info.single_step)
503 set_thread_flag(TIF_SINGLE_STEP);
498 } 504 }
499 return; 505 return;
500 } 506 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 264ea906db4c..aa37fa154512 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
43#include <asm/timer.h> 43#include <asm/timer.h>
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45#include <asm/sclp.h>
45#include <asm/cpu.h> 46#include <asm/cpu.h>
46 47
47/* 48/*
@@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr);
53cpumask_t cpu_online_map = CPU_MASK_NONE; 54cpumask_t cpu_online_map = CPU_MASK_NONE;
54EXPORT_SYMBOL(cpu_online_map); 55EXPORT_SYMBOL(cpu_online_map);
55 56
56cpumask_t cpu_possible_map = CPU_MASK_NONE; 57cpumask_t cpu_possible_map = CPU_MASK_ALL;
57EXPORT_SYMBOL(cpu_possible_map); 58EXPORT_SYMBOL(cpu_possible_map);
58 59
59static struct task_struct *current_set[NR_CPUS]; 60static struct task_struct *current_set[NR_CPUS];
60 61
62static u8 smp_cpu_type;
63static int smp_use_sigp_detection;
64
65enum s390_cpu_state {
66 CPU_STATE_STANDBY,
67 CPU_STATE_CONFIGURED,
68};
69
70#ifdef CONFIG_HOTPLUG_CPU
71static DEFINE_MUTEX(smp_cpu_state_mutex);
72#endif
73static int smp_cpu_state[NR_CPUS];
74
75static DEFINE_PER_CPU(struct cpu, cpu_devices);
76DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
77
61static void smp_ext_bitcall(int, ec_bit_sig); 78static void smp_ext_bitcall(int, ec_bit_sig);
62 79
63/* 80/*
@@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
193} 210}
194EXPORT_SYMBOL(smp_call_function_single); 211EXPORT_SYMBOL(smp_call_function_single);
195 212
213/**
214 * smp_call_function_mask(): Run a function on a set of other CPUs.
215 * @mask: The set of cpus to run on. Must not include the current cpu.
216 * @func: The function to run. This must be fast and non-blocking.
217 * @info: An arbitrary pointer to pass to the function.
218 * @wait: If true, wait (atomically) until function has completed on other CPUs.
219 *
220 * Returns 0 on success, else a negative status code.
221 *
222 * If @wait is true, then returns once @func has returned; otherwise
223 * it returns just before the target cpu calls @func.
224 *
225 * You must not call this function with disabled interrupts or from a
226 * hardware interrupt handler or from a bottom half handler.
227 */
228int
229smp_call_function_mask(cpumask_t mask,
230 void (*func)(void *), void *info,
231 int wait)
232{
233 preempt_disable();
234 __smp_call_function_map(func, info, 0, wait, mask);
235 preempt_enable();
236 return 0;
237}
238EXPORT_SYMBOL(smp_call_function_mask);
239
196void smp_send_stop(void) 240void smp_send_stop(void)
197{ 241{
198 int cpu, rc; 242 int cpu, rc;
@@ -217,33 +261,6 @@ void smp_send_stop(void)
217} 261}
218 262
219/* 263/*
220 * Reboot, halt and power_off routines for SMP.
221 */
222void machine_restart_smp(char *__unused)
223{
224 smp_send_stop();
225 do_reipl();
226}
227
228void machine_halt_smp(void)
229{
230 smp_send_stop();
231 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
232 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
233 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
234 for (;;);
235}
236
237void machine_power_off_smp(void)
238{
239 smp_send_stop();
240 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
241 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
242 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
243 for (;;);
244}
245
246/*
247 * This is the main routine where commands issued by other 264 * This is the main routine where commands issued by other
248 * cpus are handled. 265 * cpus are handled.
249 */ 266 */
@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
355} 372}
356EXPORT_SYMBOL(smp_ctl_clear_bit); 373EXPORT_SYMBOL(smp_ctl_clear_bit);
357 374
375/*
376 * In early ipl state a temp. logically cpu number is needed, so the sigp
377 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
378 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
379 */
380#define CPU_INIT_NO 1
381
358#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 382#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
359 383
360/* 384/*
@@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
375 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); 399 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
376 return; 400 return;
377 } 401 }
378 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 402 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
379 __cpu_logical_map[1] = (__u16) phy_cpu; 403 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
380 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) 404 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
405 sigp_busy)
381 cpu_relax(); 406 cpu_relax();
382 memcpy(zfcpdump_save_areas[cpu], 407 memcpy(zfcpdump_save_areas[cpu],
383 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 408 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
@@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
397 422
398#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 423#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
399 424
400/* 425static int cpu_stopped(int cpu)
401 * Lets check how many CPUs we have.
402 */
403static unsigned int __init smp_count_cpus(void)
404{ 426{
405 unsigned int cpu, num_cpus; 427 __u32 status;
406 __u16 boot_cpu_addr;
407 428
408 /* 429 /* Check for stopped state */
409 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 430 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
410 */ 431 sigp_status_stored) {
432 if (status & 0x40)
433 return 1;
434 }
435 return 0;
436}
437
438static int cpu_known(int cpu_id)
439{
440 int cpu;
441
442 for_each_present_cpu(cpu) {
443 if (__cpu_logical_map[cpu] == cpu_id)
444 return 1;
445 }
446 return 0;
447}
448
449static int smp_rescan_cpus_sigp(cpumask_t avail)
450{
451 int cpu_id, logical_cpu;
452
453 logical_cpu = first_cpu(avail);
454 if (logical_cpu == NR_CPUS)
455 return 0;
456 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
457 if (cpu_known(cpu_id))
458 continue;
459 __cpu_logical_map[logical_cpu] = cpu_id;
460 if (!cpu_stopped(logical_cpu))
461 continue;
462 cpu_set(logical_cpu, cpu_present_map);
463 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
464 logical_cpu = next_cpu(logical_cpu, avail);
465 if (logical_cpu == NR_CPUS)
466 break;
467 }
468 return 0;
469}
470
471static int smp_rescan_cpus_sclp(cpumask_t avail)
472{
473 struct sclp_cpu_info *info;
474 int cpu_id, logical_cpu, cpu;
475 int rc;
476
477 logical_cpu = first_cpu(avail);
478 if (logical_cpu == NR_CPUS)
479 return 0;
480 info = kmalloc(sizeof(*info), GFP_KERNEL);
481 if (!info)
482 return -ENOMEM;
483 rc = sclp_get_cpu_info(info);
484 if (rc)
485 goto out;
486 for (cpu = 0; cpu < info->combined; cpu++) {
487 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
488 continue;
489 cpu_id = info->cpu[cpu].address;
490 if (cpu_known(cpu_id))
491 continue;
492 __cpu_logical_map[logical_cpu] = cpu_id;
493 cpu_set(logical_cpu, cpu_present_map);
494 if (cpu >= info->configured)
495 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
496 else
497 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
498 logical_cpu = next_cpu(logical_cpu, avail);
499 if (logical_cpu == NR_CPUS)
500 break;
501 }
502out:
503 kfree(info);
504 return rc;
505}
506
507static int smp_rescan_cpus(void)
508{
509 cpumask_t avail;
510
511 cpus_xor(avail, cpu_possible_map, cpu_present_map);
512 if (smp_use_sigp_detection)
513 return smp_rescan_cpus_sigp(avail);
514 else
515 return smp_rescan_cpus_sclp(avail);
516}
517
518static void __init smp_detect_cpus(void)
519{
520 unsigned int cpu, c_cpus, s_cpus;
521 struct sclp_cpu_info *info;
522 u16 boot_cpu_addr, cpu_addr;
523
524 c_cpus = 1;
525 s_cpus = 0;
411 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 526 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
412 current_thread_info()->cpu = 0; 527 info = kmalloc(sizeof(*info), GFP_KERNEL);
413 num_cpus = 1; 528 if (!info)
414 for (cpu = 0; cpu <= 65535; cpu++) { 529 panic("smp_detect_cpus failed to allocate memory\n");
415 if ((__u16) cpu == boot_cpu_addr) 530 /* Use sigp detection algorithm if sclp doesn't work. */
531 if (sclp_get_cpu_info(info)) {
532 smp_use_sigp_detection = 1;
533 for (cpu = 0; cpu <= 65535; cpu++) {
534 if (cpu == boot_cpu_addr)
535 continue;
536 __cpu_logical_map[CPU_INIT_NO] = cpu;
537 if (!cpu_stopped(CPU_INIT_NO))
538 continue;
539 smp_get_save_area(c_cpus, cpu);
540 c_cpus++;
541 }
542 goto out;
543 }
544
545 if (info->has_cpu_type) {
546 for (cpu = 0; cpu < info->combined; cpu++) {
547 if (info->cpu[cpu].address == boot_cpu_addr) {
548 smp_cpu_type = info->cpu[cpu].type;
549 break;
550 }
551 }
552 }
553
554 for (cpu = 0; cpu < info->combined; cpu++) {
555 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
556 continue;
557 cpu_addr = info->cpu[cpu].address;
558 if (cpu_addr == boot_cpu_addr)
416 continue; 559 continue;
417 __cpu_logical_map[1] = (__u16) cpu; 560 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
418 if (signal_processor(1, sigp_sense) == sigp_not_operational) 561 if (!cpu_stopped(CPU_INIT_NO)) {
562 s_cpus++;
419 continue; 563 continue;
420 smp_get_save_area(num_cpus, cpu); 564 }
421 num_cpus++; 565 smp_get_save_area(c_cpus, cpu_addr);
566 c_cpus++;
422 } 567 }
423 printk("Detected %d CPU's\n", (int) num_cpus); 568out:
424 printk("Boot cpu address %2X\n", boot_cpu_addr); 569 kfree(info);
425 return num_cpus; 570 printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
571 get_online_cpus();
572 smp_rescan_cpus();
573 put_online_cpus();
426} 574}
427 575
428/* 576/*
@@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid)
453 return 0; 601 return 0;
454} 602}
455 603
456DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
457
458static void __init smp_create_idle(unsigned int cpu) 604static void __init smp_create_idle(unsigned int cpu)
459{ 605{
460 struct task_struct *p; 606 struct task_struct *p;
@@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu)
470 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); 616 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
471} 617}
472 618
473static int cpu_stopped(int cpu) 619static int __cpuinit smp_alloc_lowcore(int cpu)
474{ 620{
475 __u32 status; 621 unsigned long async_stack, panic_stack;
622 struct _lowcore *lowcore;
623 int lc_order;
624
625 lc_order = sizeof(long) == 8 ? 1 : 0;
626 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
627 if (!lowcore)
628 return -ENOMEM;
629 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
630 if (!async_stack)
631 goto out_async_stack;
632 panic_stack = __get_free_page(GFP_KERNEL);
633 if (!panic_stack)
634 goto out_panic_stack;
635
636 *lowcore = S390_lowcore;
637 lowcore->async_stack = async_stack + ASYNC_SIZE;
638 lowcore->panic_stack = panic_stack + PAGE_SIZE;
476 639
477 /* Check for stopped state */ 640#ifndef CONFIG_64BIT
478 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == 641 if (MACHINE_HAS_IEEE) {
479 sigp_status_stored) { 642 unsigned long save_area;
480 if (status & 0x40) 643
481 return 1; 644 save_area = get_zeroed_page(GFP_KERNEL);
645 if (!save_area)
646 goto out_save_area;
647 lowcore->extended_save_area_addr = (u32) save_area;
482 } 648 }
649#endif
650 lowcore_ptr[cpu] = lowcore;
483 return 0; 651 return 0;
652
653#ifndef CONFIG_64BIT
654out_save_area:
655 free_page(panic_stack);
656#endif
657out_panic_stack:
658 free_pages(async_stack, ASYNC_ORDER);
659out_async_stack:
660 free_pages((unsigned long) lowcore, lc_order);
661 return -ENOMEM;
484} 662}
485 663
486/* Upping and downing of CPUs */ 664#ifdef CONFIG_HOTPLUG_CPU
665static void smp_free_lowcore(int cpu)
666{
667 struct _lowcore *lowcore;
668 int lc_order;
669
670 lc_order = sizeof(long) == 8 ? 1 : 0;
671 lowcore = lowcore_ptr[cpu];
672#ifndef CONFIG_64BIT
673 if (MACHINE_HAS_IEEE)
674 free_page((unsigned long) lowcore->extended_save_area_addr);
675#endif
676 free_page(lowcore->panic_stack - PAGE_SIZE);
677 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
678 free_pages((unsigned long) lowcore, lc_order);
679 lowcore_ptr[cpu] = NULL;
680}
681#endif /* CONFIG_HOTPLUG_CPU */
487 682
488int __cpu_up(unsigned int cpu) 683/* Upping and downing of CPUs */
684int __cpuinit __cpu_up(unsigned int cpu)
489{ 685{
490 struct task_struct *idle; 686 struct task_struct *idle;
491 struct _lowcore *cpu_lowcore; 687 struct _lowcore *cpu_lowcore;
492 struct stack_frame *sf; 688 struct stack_frame *sf;
493 sigp_ccode ccode; 689 sigp_ccode ccode;
494 int curr_cpu;
495 690
496 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 691 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
497 __cpu_logical_map[cpu] = (__u16) curr_cpu; 692 return -EIO;
498 if (cpu_stopped(cpu)) 693 if (smp_alloc_lowcore(cpu))
499 break; 694 return -ENOMEM;
500 }
501
502 if (!cpu_stopped(cpu))
503 return -ENODEV;
504 695
505 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 696 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
506 cpu, sigp_set_prefix); 697 cpu, sigp_set_prefix);
@@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu)
515 cpu_lowcore = lowcore_ptr[cpu]; 706 cpu_lowcore = lowcore_ptr[cpu];
516 cpu_lowcore->kernel_stack = (unsigned long) 707 cpu_lowcore->kernel_stack = (unsigned long)
517 task_stack_page(idle) + THREAD_SIZE; 708 task_stack_page(idle) + THREAD_SIZE;
709 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
518 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 710 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
519 - sizeof(struct pt_regs) 711 - sizeof(struct pt_regs)
520 - sizeof(struct stack_frame)); 712 - sizeof(struct stack_frame));
@@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu)
528 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 720 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
529 cpu_lowcore->current_task = (unsigned long) idle; 721 cpu_lowcore->current_task = (unsigned long) idle;
530 cpu_lowcore->cpu_data.cpu_nr = cpu; 722 cpu_lowcore->cpu_data.cpu_nr = cpu;
723 cpu_lowcore->softirq_pending = 0;
724 cpu_lowcore->ext_call_fast = 0;
531 eieio(); 725 eieio();
532 726
533 while (signal_processor(cpu, sigp_restart) == sigp_busy) 727 while (signal_processor(cpu, sigp_restart) == sigp_busy)
@@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu)
538 return 0; 732 return 0;
539} 733}
540 734
541static unsigned int __initdata additional_cpus; 735static int __init setup_possible_cpus(char *s)
542static unsigned int __initdata possible_cpus;
543
544void __init smp_setup_cpu_possible_map(void)
545{ 736{
546 unsigned int phy_cpus, pos_cpus, cpu; 737 int pcpus, cpu;
547
548 phy_cpus = smp_count_cpus();
549 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
550
551 if (possible_cpus)
552 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
553 738
554 for (cpu = 0; cpu < pos_cpus; cpu++) 739 pcpus = simple_strtoul(s, NULL, 0);
740 cpu_possible_map = cpumask_of_cpu(0);
741 for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
555 cpu_set(cpu, cpu_possible_map); 742 cpu_set(cpu, cpu_possible_map);
556
557 phy_cpus = min(phy_cpus, pos_cpus);
558
559 for (cpu = 0; cpu < phy_cpus; cpu++)
560 cpu_set(cpu, cpu_present_map);
561}
562
563#ifdef CONFIG_HOTPLUG_CPU
564
565static int __init setup_additional_cpus(char *s)
566{
567 additional_cpus = simple_strtoul(s, NULL, 0);
568 return 0;
569}
570early_param("additional_cpus", setup_additional_cpus);
571
572static int __init setup_possible_cpus(char *s)
573{
574 possible_cpus = simple_strtoul(s, NULL, 0);
575 return 0; 743 return 0;
576} 744}
577early_param("possible_cpus", setup_possible_cpus); 745early_param("possible_cpus", setup_possible_cpus);
578 746
747#ifdef CONFIG_HOTPLUG_CPU
748
579int __cpu_disable(void) 749int __cpu_disable(void)
580{ 750{
581 struct ec_creg_mask_parms cr_parms; 751 struct ec_creg_mask_parms cr_parms;
@@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu)
612 /* Wait until target cpu is down */ 782 /* Wait until target cpu is down */
613 while (!smp_cpu_not_running(cpu)) 783 while (!smp_cpu_not_running(cpu))
614 cpu_relax(); 784 cpu_relax();
615 printk("Processor %d spun down\n", cpu); 785 smp_free_lowcore(cpu);
786 printk(KERN_INFO "Processor %d spun down\n", cpu);
616} 787}
617 788
618void cpu_die(void) 789void cpu_die(void)
@@ -625,49 +796,19 @@ void cpu_die(void)
625 796
626#endif /* CONFIG_HOTPLUG_CPU */ 797#endif /* CONFIG_HOTPLUG_CPU */
627 798
628/*
629 * Cycle through the processors and setup structures.
630 */
631
632void __init smp_prepare_cpus(unsigned int max_cpus) 799void __init smp_prepare_cpus(unsigned int max_cpus)
633{ 800{
634 unsigned long stack;
635 unsigned int cpu; 801 unsigned int cpu;
636 int i; 802
803 smp_detect_cpus();
637 804
638 /* request the 0x1201 emergency signal external interrupt */ 805 /* request the 0x1201 emergency signal external interrupt */
639 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 806 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
640 panic("Couldn't request external interrupt 0x1201"); 807 panic("Couldn't request external interrupt 0x1201");
641 memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); 808 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
642 /*
643 * Initialize prefix pages and stacks for all possible cpus
644 */
645 print_cpu_info(&S390_lowcore.cpu_data); 809 print_cpu_info(&S390_lowcore.cpu_data);
810 smp_alloc_lowcore(smp_processor_id());
646 811
647 for_each_possible_cpu(i) {
648 lowcore_ptr[i] = (struct _lowcore *)
649 __get_free_pages(GFP_KERNEL | GFP_DMA,
650 sizeof(void*) == 8 ? 1 : 0);
651 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
652 if (!lowcore_ptr[i] || !stack)
653 panic("smp_boot_cpus failed to allocate memory\n");
654
655 *(lowcore_ptr[i]) = S390_lowcore;
656 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
657 stack = __get_free_pages(GFP_KERNEL, 0);
658 if (!stack)
659 panic("smp_boot_cpus failed to allocate memory\n");
660 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
661#ifndef CONFIG_64BIT
662 if (MACHINE_HAS_IEEE) {
663 lowcore_ptr[i]->extended_save_area_addr =
664 (__u32) __get_free_pages(GFP_KERNEL, 0);
665 if (!lowcore_ptr[i]->extended_save_area_addr)
666 panic("smp_boot_cpus failed to "
667 "allocate memory\n");
668 }
669#endif
670 }
671#ifndef CONFIG_64BIT 812#ifndef CONFIG_64BIT
672 if (MACHINE_HAS_IEEE) 813 if (MACHINE_HAS_IEEE)
673 ctl_set_bit(14, 29); /* enable extended save area */ 814 ctl_set_bit(14, 29); /* enable extended save area */
@@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void)
683{ 824{
684 BUG_ON(smp_processor_id() != 0); 825 BUG_ON(smp_processor_id() != 0);
685 826
827 current_thread_info()->cpu = 0;
828 cpu_set(0, cpu_present_map);
686 cpu_set(0, cpu_online_map); 829 cpu_set(0, cpu_online_map);
687 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 830 S390_lowcore.percpu_offset = __per_cpu_offset[0];
688 current_set[0] = current; 831 current_set[0] = current;
832 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
689 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 833 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
690} 834}
691 835
692void __init smp_cpus_done(unsigned int max_cpus) 836void __init smp_cpus_done(unsigned int max_cpus)
693{ 837{
694 cpu_present_map = cpu_possible_map;
695} 838}
696 839
697/* 840/*
@@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier)
705 return 0; 848 return 0;
706} 849}
707 850
708static DEFINE_PER_CPU(struct cpu, cpu_devices); 851#ifdef CONFIG_HOTPLUG_CPU
852static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
853{
854 ssize_t count;
855
856 mutex_lock(&smp_cpu_state_mutex);
857 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
858 mutex_unlock(&smp_cpu_state_mutex);
859 return count;
860}
861
862static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
863 size_t count)
864{
865 int cpu = dev->id;
866 int val, rc;
867 char delim;
868
869 if (sscanf(buf, "%d %c", &val, &delim) != 1)
870 return -EINVAL;
871 if (val != 0 && val != 1)
872 return -EINVAL;
873
874 mutex_lock(&smp_cpu_state_mutex);
875 get_online_cpus();
876 rc = -EBUSY;
877 if (cpu_online(cpu))
878 goto out;
879 rc = 0;
880 switch (val) {
881 case 0:
882 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
883 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
884 if (!rc)
885 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
886 }
887 break;
888 case 1:
889 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
890 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
891 if (!rc)
892 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
893 }
894 break;
895 default:
896 break;
897 }
898out:
899 put_online_cpus();
900 mutex_unlock(&smp_cpu_state_mutex);
901 return rc ? rc : count;
902}
903static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
904#endif /* CONFIG_HOTPLUG_CPU */
905
906static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
907{
908 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
909}
910static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
911
912
913static struct attribute *cpu_common_attrs[] = {
914#ifdef CONFIG_HOTPLUG_CPU
915 &attr_configure.attr,
916#endif
917 &attr_address.attr,
918 NULL,
919};
920
921static struct attribute_group cpu_common_attr_group = {
922 .attrs = cpu_common_attrs,
923};
709 924
710static ssize_t show_capability(struct sys_device *dev, char *buf) 925static ssize_t show_capability(struct sys_device *dev, char *buf)
711{ 926{
@@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
750} 965}
751static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); 966static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
752 967
753static struct attribute *cpu_attrs[] = { 968static struct attribute *cpu_online_attrs[] = {
754 &attr_capability.attr, 969 &attr_capability.attr,
755 &attr_idle_count.attr, 970 &attr_idle_count.attr,
756 &attr_idle_time_us.attr, 971 &attr_idle_time_us.attr,
757 NULL, 972 NULL,
758}; 973};
759 974
760static struct attribute_group cpu_attr_group = { 975static struct attribute_group cpu_online_attr_group = {
761 .attrs = cpu_attrs, 976 .attrs = cpu_online_attrs,
762}; 977};
763 978
764static int __cpuinit smp_cpu_notify(struct notifier_block *self, 979static int __cpuinit smp_cpu_notify(struct notifier_block *self,
@@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
778 idle->idle_time = 0; 993 idle->idle_time = 0;
779 idle->idle_count = 0; 994 idle->idle_count = 0;
780 spin_unlock_irq(&idle->lock); 995 spin_unlock_irq(&idle->lock);
781 if (sysfs_create_group(&s->kobj, &cpu_attr_group)) 996 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
782 return NOTIFY_BAD; 997 return NOTIFY_BAD;
783 break; 998 break;
784 case CPU_DEAD: 999 case CPU_DEAD:
785 case CPU_DEAD_FROZEN: 1000 case CPU_DEAD_FROZEN:
786 sysfs_remove_group(&s->kobj, &cpu_attr_group); 1001 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
787 break; 1002 break;
788 } 1003 }
789 return NOTIFY_OK; 1004 return NOTIFY_OK;
@@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
793 .notifier_call = smp_cpu_notify, 1008 .notifier_call = smp_cpu_notify,
794}; 1009};
795 1010
1011static int smp_add_present_cpu(int cpu)
1012{
1013 struct cpu *c = &per_cpu(cpu_devices, cpu);
1014 struct sys_device *s = &c->sysdev;
1015 int rc;
1016
1017 c->hotpluggable = 1;
1018 rc = register_cpu(c, cpu);
1019 if (rc)
1020 goto out;
1021 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1022 if (rc)
1023 goto out_cpu;
1024 if (!cpu_online(cpu))
1025 goto out;
1026 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1027 if (!rc)
1028 return 0;
1029 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1030out_cpu:
1031#ifdef CONFIG_HOTPLUG_CPU
1032 unregister_cpu(c);
1033#endif
1034out:
1035 return rc;
1036}
1037
1038#ifdef CONFIG_HOTPLUG_CPU
1039static ssize_t rescan_store(struct sys_device *dev, const char *buf,
1040 size_t count)
1041{
1042 cpumask_t newcpus;
1043 int cpu;
1044 int rc;
1045
1046 mutex_lock(&smp_cpu_state_mutex);
1047 get_online_cpus();
1048 newcpus = cpu_present_map;
1049 rc = smp_rescan_cpus();
1050 if (rc)
1051 goto out;
1052 cpus_andnot(newcpus, cpu_present_map, newcpus);
1053 for_each_cpu_mask(cpu, newcpus) {
1054 rc = smp_add_present_cpu(cpu);
1055 if (rc)
1056 cpu_clear(cpu, cpu_present_map);
1057 }
1058 rc = 0;
1059out:
1060 put_online_cpus();
1061 mutex_unlock(&smp_cpu_state_mutex);
1062 return rc ? rc : count;
1063}
1064static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1065#endif /* CONFIG_HOTPLUG_CPU */
1066
796static int __init topology_init(void) 1067static int __init topology_init(void)
797{ 1068{
798 int cpu; 1069 int cpu;
@@ -800,16 +1071,14 @@ static int __init topology_init(void)
800 1071
801 register_cpu_notifier(&smp_cpu_nb); 1072 register_cpu_notifier(&smp_cpu_nb);
802 1073
803 for_each_possible_cpu(cpu) { 1074#ifdef CONFIG_HOTPLUG_CPU
804 struct cpu *c = &per_cpu(cpu_devices, cpu); 1075 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
805 struct sys_device *s = &c->sysdev; 1076 &attr_rescan.attr);
806 1077 if (rc)
807 c->hotpluggable = 1; 1078 return rc;
808 register_cpu(c, cpu); 1079#endif
809 if (!cpu_online(cpu)) 1080 for_each_present_cpu(cpu) {
810 continue; 1081 rc = smp_add_present_cpu(cpu);
811 s = &c->sysdev;
812 rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
813 if (rc) 1082 if (rc)
814 return rc; 1083 return rc;
815 } 1084 }
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 22b800ce2126..3bbac1293be4 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -1145,7 +1145,7 @@ static void etr_work_fn(struct work_struct *work)
1145 * Sysfs interface functions 1145 * Sysfs interface functions
1146 */ 1146 */
1147static struct sysdev_class etr_sysclass = { 1147static struct sysdev_class etr_sysclass = {
1148 set_kset_name("etr") 1148 .name = "etr",
1149}; 1149};
1150 1150
1151static struct sys_device etr_port0_dev = { 1151static struct sys_device etr_port0_dev = {
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 8ed16a83fba7..52b8342c6bf2 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -31,6 +31,7 @@
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/bug.h> 33#include <linux/bug.h>
34#include <linux/utsname.h>
34#include <asm/system.h> 35#include <asm/system.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <asm/io.h> 37#include <asm/io.h>
@@ -168,9 +169,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
168 */ 169 */
169void dump_stack(void) 170void dump_stack(void)
170{ 171{
172 printk("CPU: %d %s %s %.*s\n",
173 task_thread_info(current)->cpu, print_tainted(),
174 init_utsname()->release,
175 (int)strcspn(init_utsname()->version, " "),
176 init_utsname()->version);
177 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
178 current->comm, current->pid, current,
179 (void *) current->thread.ksp);
171 show_stack(NULL, NULL); 180 show_stack(NULL, NULL);
172} 181}
173
174EXPORT_SYMBOL(dump_stack); 182EXPORT_SYMBOL(dump_stack);
175 183
176static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 184static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
@@ -258,8 +266,14 @@ void die(const char * str, struct pt_regs * regs, long err)
258 console_verbose(); 266 console_verbose();
259 spin_lock_irq(&die_lock); 267 spin_lock_irq(&die_lock);
260 bust_spinlocks(1); 268 bust_spinlocks(1);
261 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 269 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
262 print_modules(); 270#ifdef CONFIG_PREEMPT
271 printk("PREEMPT ");
272#endif
273#ifdef CONFIG_SMP
274 printk("SMP");
275#endif
276 printk("\n");
263 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); 277 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
264 show_regs(regs); 278 show_regs(regs);
265 bust_spinlocks(0); 279 bust_spinlocks(0);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 849120e3e28a..7d43c3cd3ef3 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -17,6 +17,12 @@ ENTRY(_start)
17jiffies = jiffies_64; 17jiffies = jiffies_64;
18#endif 18#endif
19 19
20PHDRS {
21 text PT_LOAD FLAGS(5); /* R_E */
22 data PT_LOAD FLAGS(7); /* RWE */
23 note PT_NOTE FLAGS(0); /* ___ */
24}
25
20SECTIONS 26SECTIONS
21{ 27{
22 . = 0x00000000; 28 . = 0x00000000;
@@ -33,6 +39,9 @@ SECTIONS
33 39
34 _etext = .; /* End of text section */ 40 _etext = .; /* End of text section */
35 41
42 NOTES :text :note
43 BUG_TABLE :text
44
36 RODATA 45 RODATA
37 46
38#ifdef CONFIG_SHARED_KERNEL 47#ifdef CONFIG_SHARED_KERNEL
@@ -49,9 +58,6 @@ SECTIONS
49 __stop___ex_table = .; 58 __stop___ex_table = .;
50 } 59 }
51 60
52 NOTES
53 BUG_TABLE
54
55 .data : { /* Data */ 61 .data : { /* Data */
56 DATA_DATA 62 DATA_DATA
57 CONSTRUCTORS 63 CONSTRUCTORS
@@ -91,7 +97,7 @@ SECTIONS
91 __init_begin = .; 97 __init_begin = .;
92 .init.text : { 98 .init.text : {
93 _sinittext = .; 99 _sinittext = .;
94 *(.init.text) 100 INIT_TEXT
95 _einittext = .; 101 _einittext = .;
96 } 102 }
97 /* 103 /*
@@ -99,11 +105,11 @@ SECTIONS
99 * to deal with references from __bug_table 105 * to deal with references from __bug_table
100 */ 106 */
101 .exit.text : { 107 .exit.text : {
102 *(.exit.text) 108 EXIT_TEXT
103 } 109 }
104 110
105 .init.data : { 111 .init.data : {
106 *(.init.data) 112 INIT_DATA
107 } 113 }
108 . = ALIGN(0x100); 114 . = ALIGN(0x100);
109 .init.setup : { 115 .init.setup : {
@@ -150,7 +156,7 @@ SECTIONS
150 156
151 /* Sections to be discarded */ 157 /* Sections to be discarded */
152 /DISCARD/ : { 158 /DISCARD/ : {
153 *(.exit.data) 159 EXIT_DATA
154 *(.exitcall.exit) 160 *(.exitcall.exit)
155 } 161 }
156 162
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 8d76403fcf89..e41f4008afc5 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
39 _raw_yield(); 39 _raw_yield();
40} 40}
41 41
42void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc) 42void _raw_spin_lock_wait(raw_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
@@ -53,15 +53,36 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
53 } 53 }
54 if (__raw_spin_is_locked(lp)) 54 if (__raw_spin_is_locked(lp))
55 continue; 55 continue;
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) { 56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 lp->owner_pc = pc;
58 return; 57 return;
59 }
60 } 58 }
61} 59}
62EXPORT_SYMBOL(_raw_spin_lock_wait); 60EXPORT_SYMBOL(_raw_spin_lock_wait);
63 61
64int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc) 62void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
63{
64 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id();
66
67 local_irq_restore(flags);
68 while (1) {
69 if (count-- <= 0) {
70 unsigned int owner = lp->owner_cpu;
71 if (owner != 0)
72 _raw_yield_cpu(~owner);
73 count = spin_retry;
74 }
75 if (__raw_spin_is_locked(lp))
76 continue;
77 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
79 return;
80 local_irq_restore(flags);
81 }
82}
83EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
84
85int _raw_spin_trylock_retry(raw_spinlock_t *lp)
65{ 86{
66 unsigned int cpu = ~smp_processor_id(); 87 unsigned int cpu = ~smp_processor_id();
67 int count; 88 int count;
@@ -69,10 +90,8 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
69 for (count = spin_retry; count > 0; count--) { 90 for (count = spin_retry; count > 0; count--) {
70 if (__raw_spin_is_locked(lp)) 91 if (__raw_spin_is_locked(lp))
71 continue; 92 continue;
72 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) { 93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
73 lp->owner_pc = pc;
74 return 1; 94 return 1;
75 }
76 } 95 }
77 return 0; 96 return 0;
78} 97}
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 394980b05e6f..880b0ebf894b 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -83,7 +83,7 @@ struct dcss_segment {
83}; 83};
84 84
85static DEFINE_MUTEX(dcss_lock); 85static DEFINE_MUTEX(dcss_lock);
86static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list); 86static LIST_HEAD(dcss_list);
87static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", 87static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
88 "EW/EN-MIXED" }; 88 "EW/EN-MIXED" };
89 89
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fb9c5a85aa56..79d13a166a3d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -15,10 +15,6 @@
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex); 18static DEFINE_MUTEX(vmem_mutex);
23 19
24struct memory_segment { 20struct memory_segment {
@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
188 pte_t pte; 184 pte_t pte;
189 int ret = -ENOMEM; 185 int ret = -ENOMEM;
190 186
191 map_start = vmem_map + PFN_DOWN(start); 187 map_start = VMEM_MAP + PFN_DOWN(start);
192 map_end = vmem_map + PFN_DOWN(start + size); 188 map_end = VMEM_MAP + PFN_DOWN(start + size);
193 189
194 start_addr = (unsigned long) map_start & PAGE_MASK; 190 start_addr = (unsigned long) map_start & PAGE_MASK;
195 end_addr = PFN_ALIGN((unsigned long) map_end); 191 end_addr = PFN_ALIGN((unsigned long) map_end);
@@ -240,10 +236,10 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
240{ 236{
241 int ret; 237 int ret;
242 238
243 ret = vmem_add_range(start, size); 239 ret = vmem_add_mem_map(start, size);
244 if (ret) 240 if (ret)
245 return ret; 241 return ret;
246 return vmem_add_mem_map(start, size); 242 return vmem_add_range(start, size);
247} 243}
248 244
249/* 245/*
@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
254{ 250{
255 struct memory_segment *tmp; 251 struct memory_segment *tmp;
256 252
257 if (PFN_DOWN(seg->start + seg->size) > max_pfn || 253 if (seg->start + seg->size >= VMALLOC_START ||
258 seg->start + seg->size < seg->start) 254 seg->start + seg->size < seg->start)
259 return -ERANGE; 255 return -ERANGE;
260 256
@@ -357,17 +353,15 @@ out:
357 353
358/* 354/*
359 * map whole physical memory to virtual memory (identity mapping) 355 * map whole physical memory to virtual memory (identity mapping)
356 * we reserve enough space in the vmalloc area for vmemmap to hotplug
357 * additional memory segments.
360 */ 358 */
361void __init vmem_map_init(void) 359void __init vmem_map_init(void)
362{ 360{
363 unsigned long map_size;
364 int i; 361 int i;
365 362
366 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); 363 BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
367 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); 364 NODE_DATA(0)->node_mem_map = VMEM_MAP;
368 vmem_map = (struct page *) vmalloc_end;
369 NODE_DATA(0)->node_mem_map = vmem_map;
370
371 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 365 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
372 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); 366 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
373} 367}
@@ -382,7 +376,7 @@ static int __init vmem_convert_memory_chunk(void)
382 int i; 376 int i;
383 377
384 mutex_lock(&vmem_mutex); 378 mutex_lock(&vmem_mutex);
385 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 379 for (i = 0; i < MEMORY_CHUNKS; i++) {
386 if (!memory_chunk[i].size) 380 if (!memory_chunk[i].size)
387 continue; 381 continue;
388 seg = kzalloc(sizeof(*seg), GFP_KERNEL); 382 seg = kzalloc(sizeof(*seg), GFP_KERNEL);