aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-03-04 09:58:00 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-03-04 09:58:00 -0500
commit1c2af4968ea533e875d7cf8d095c084f18164f5d (patch)
treef8bb9989ff0a770a1324ed3a44acace3c35de63d /arch/s390
parenta2fa301fddfcb614568a74317fe9b935dd980045 (diff)
parent56041bf920d2937b7cadcb30cb206f0372eee814 (diff)
Merge tag 'kvm-for-3.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into kvm-next
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/crypto/aes_s390.c65
-rw-r--r--arch/s390/crypto/des_s390.c95
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/mm/page-states.c10
5 files changed, 124 insertions, 54 deletions
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 4c4a1cef5208..47c8630c93cd 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -529,6 +529,7 @@ static int __init appldata_init(void)
529{ 529{
530 int rc; 530 int rc;
531 531
532 init_virt_timer(&appldata_timer);
532 appldata_timer.function = appldata_timer_function; 533 appldata_timer.function = appldata_timer_function;
533 appldata_timer.data = (unsigned long) &appldata_work; 534 appldata_timer.data = (unsigned long) &appldata_work;
534 535
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b3feabd39f31..cf3c0089bef2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -25,6 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/spinlock.h>
28#include "crypt_s390.h" 29#include "crypt_s390.h"
29 30
30#define AES_KEYLEN_128 1 31#define AES_KEYLEN_128 1
@@ -32,6 +33,7 @@
32#define AES_KEYLEN_256 4 33#define AES_KEYLEN_256 4
33 34
34static u8 *ctrblk; 35static u8 *ctrblk;
36static DEFINE_SPINLOCK(ctrblk_lock);
35static char keylen_flag; 37static char keylen_flag;
36 38
37struct s390_aes_ctx { 39struct s390_aes_ctx {
@@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
758 return aes_set_key(tfm, in_key, key_len); 760 return aes_set_key(tfm, in_key, key_len);
759} 761}
760 762
763static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
764{
765 unsigned int i, n;
766
767 /* only use complete blocks, max. PAGE_SIZE */
768 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
769 for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
770 memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
771 AES_BLOCK_SIZE);
772 crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
773 }
774 return n;
775}
776
761static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, 777static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
762 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) 778 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
763{ 779{
764 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); 780 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
765 unsigned int i, n, nbytes; 781 unsigned int n, nbytes;
766 u8 buf[AES_BLOCK_SIZE]; 782 u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
767 u8 *out, *in; 783 u8 *out, *in, *ctrptr = ctrbuf;
768 784
769 if (!walk->nbytes) 785 if (!walk->nbytes)
770 return ret; 786 return ret;
771 787
772 memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); 788 if (spin_trylock(&ctrblk_lock))
789 ctrptr = ctrblk;
790
791 memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
773 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 792 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
774 out = walk->dst.virt.addr; 793 out = walk->dst.virt.addr;
775 in = walk->src.virt.addr; 794 in = walk->src.virt.addr;
776 while (nbytes >= AES_BLOCK_SIZE) { 795 while (nbytes >= AES_BLOCK_SIZE) {
777 /* only use complete blocks, max. PAGE_SIZE */ 796 if (ctrptr == ctrblk)
778 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : 797 n = __ctrblk_init(ctrptr, nbytes);
779 nbytes & ~(AES_BLOCK_SIZE - 1); 798 else
780 for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { 799 n = AES_BLOCK_SIZE;
781 memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, 800 ret = crypt_s390_kmctr(func, sctx->key, out, in,
782 AES_BLOCK_SIZE); 801 n, ctrptr);
783 crypto_inc(ctrblk + i, AES_BLOCK_SIZE); 802 if (ret < 0 || ret != n) {
784 } 803 if (ctrptr == ctrblk)
785 ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); 804 spin_unlock(&ctrblk_lock);
786 if (ret < 0 || ret != n)
787 return -EIO; 805 return -EIO;
806 }
788 if (n > AES_BLOCK_SIZE) 807 if (n > AES_BLOCK_SIZE)
789 memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, 808 memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
790 AES_BLOCK_SIZE); 809 AES_BLOCK_SIZE);
791 crypto_inc(ctrblk, AES_BLOCK_SIZE); 810 crypto_inc(ctrptr, AES_BLOCK_SIZE);
792 out += n; 811 out += n;
793 in += n; 812 in += n;
794 nbytes -= n; 813 nbytes -= n;
795 } 814 }
796 ret = blkcipher_walk_done(desc, walk, nbytes); 815 ret = blkcipher_walk_done(desc, walk, nbytes);
797 } 816 }
817 if (ctrptr == ctrblk) {
818 if (nbytes)
819 memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
820 else
821 memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
822 spin_unlock(&ctrblk_lock);
823 }
798 /* 824 /*
799 * final block may be < AES_BLOCK_SIZE, copy only nbytes 825 * final block may be < AES_BLOCK_SIZE, copy only nbytes
800 */ 826 */
@@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
802 out = walk->dst.virt.addr; 828 out = walk->dst.virt.addr;
803 in = walk->src.virt.addr; 829 in = walk->src.virt.addr;
804 ret = crypt_s390_kmctr(func, sctx->key, buf, in, 830 ret = crypt_s390_kmctr(func, sctx->key, buf, in,
805 AES_BLOCK_SIZE, ctrblk); 831 AES_BLOCK_SIZE, ctrbuf);
806 if (ret < 0 || ret != AES_BLOCK_SIZE) 832 if (ret < 0 || ret != AES_BLOCK_SIZE)
807 return -EIO; 833 return -EIO;
808 memcpy(out, buf, nbytes); 834 memcpy(out, buf, nbytes);
809 crypto_inc(ctrblk, AES_BLOCK_SIZE); 835 crypto_inc(ctrbuf, AES_BLOCK_SIZE);
810 ret = blkcipher_walk_done(desc, walk, 0); 836 ret = blkcipher_walk_done(desc, walk, 0);
837 memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
811 } 838 }
812 memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); 839
813 return ret; 840 return ret;
814} 841}
815 842
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 200f2a1b599d..0a5aac8a9412 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -25,6 +25,7 @@
25#define DES3_KEY_SIZE (3 * DES_KEY_SIZE) 25#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
26 26
27static u8 *ctrblk; 27static u8 *ctrblk;
28static DEFINE_SPINLOCK(ctrblk_lock);
28 29
29struct s390_des_ctx { 30struct s390_des_ctx {
30 u8 iv[DES_BLOCK_SIZE]; 31 u8 iv[DES_BLOCK_SIZE];
@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
105} 106}
106 107
107static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, 108static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
108 u8 *iv, struct blkcipher_walk *walk) 109 struct blkcipher_walk *walk)
109{ 110{
111 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
110 int ret = blkcipher_walk_virt(desc, walk); 112 int ret = blkcipher_walk_virt(desc, walk);
111 unsigned int nbytes = walk->nbytes; 113 unsigned int nbytes = walk->nbytes;
114 struct {
115 u8 iv[DES_BLOCK_SIZE];
116 u8 key[DES3_KEY_SIZE];
117 } param;
112 118
113 if (!nbytes) 119 if (!nbytes)
114 goto out; 120 goto out;
115 121
116 memcpy(iv, walk->iv, DES_BLOCK_SIZE); 122 memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
123 memcpy(param.key, ctx->key, DES3_KEY_SIZE);
117 do { 124 do {
118 /* only use complete blocks */ 125 /* only use complete blocks */
119 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); 126 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
120 u8 *out = walk->dst.virt.addr; 127 u8 *out = walk->dst.virt.addr;
121 u8 *in = walk->src.virt.addr; 128 u8 *in = walk->src.virt.addr;
122 129
123 ret = crypt_s390_kmc(func, iv, out, in, n); 130 ret = crypt_s390_kmc(func, &param, out, in, n);
124 if (ret < 0 || ret != n) 131 if (ret < 0 || ret != n)
125 return -EIO; 132 return -EIO;
126 133
127 nbytes &= DES_BLOCK_SIZE - 1; 134 nbytes &= DES_BLOCK_SIZE - 1;
128 ret = blkcipher_walk_done(desc, walk, nbytes); 135 ret = blkcipher_walk_done(desc, walk, nbytes);
129 } while ((nbytes = walk->nbytes)); 136 } while ((nbytes = walk->nbytes));
130 memcpy(walk->iv, iv, DES_BLOCK_SIZE); 137 memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
131 138
132out: 139out:
133 return ret; 140 return ret;
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
179 struct scatterlist *dst, struct scatterlist *src, 186 struct scatterlist *dst, struct scatterlist *src,
180 unsigned int nbytes) 187 unsigned int nbytes)
181{ 188{
182 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
183 struct blkcipher_walk walk; 189 struct blkcipher_walk walk;
184 190
185 blkcipher_walk_init(&walk, dst, src, nbytes); 191 blkcipher_walk_init(&walk, dst, src, nbytes);
186 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); 192 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
187} 193}
188 194
189static int cbc_des_decrypt(struct blkcipher_desc *desc, 195static int cbc_des_decrypt(struct blkcipher_desc *desc,
190 struct scatterlist *dst, struct scatterlist *src, 196 struct scatterlist *dst, struct scatterlist *src,
191 unsigned int nbytes) 197 unsigned int nbytes)
192{ 198{
193 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
194 struct blkcipher_walk walk; 199 struct blkcipher_walk walk;
195 200
196 blkcipher_walk_init(&walk, dst, src, nbytes); 201 blkcipher_walk_init(&walk, dst, src, nbytes);
197 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); 202 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
198} 203}
199 204
200static struct crypto_alg cbc_des_alg = { 205static struct crypto_alg cbc_des_alg = {
@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
327 struct scatterlist *dst, struct scatterlist *src, 332 struct scatterlist *dst, struct scatterlist *src,
328 unsigned int nbytes) 333 unsigned int nbytes)
329{ 334{
330 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
331 struct blkcipher_walk walk; 335 struct blkcipher_walk walk;
332 336
333 blkcipher_walk_init(&walk, dst, src, nbytes); 337 blkcipher_walk_init(&walk, dst, src, nbytes);
334 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); 338 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
335} 339}
336 340
337static int cbc_des3_decrypt(struct blkcipher_desc *desc, 341static int cbc_des3_decrypt(struct blkcipher_desc *desc,
338 struct scatterlist *dst, struct scatterlist *src, 342 struct scatterlist *dst, struct scatterlist *src,
339 unsigned int nbytes) 343 unsigned int nbytes)
340{ 344{
341 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
342 struct blkcipher_walk walk; 345 struct blkcipher_walk walk;
343 346
344 blkcipher_walk_init(&walk, dst, src, nbytes); 347 blkcipher_walk_init(&walk, dst, src, nbytes);
345 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); 348 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
346} 349}
347 350
348static struct crypto_alg cbc_des3_alg = { 351static struct crypto_alg cbc_des3_alg = {
@@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
366 } 369 }
367}; 370};
368 371
372static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
373{
374 unsigned int i, n;
375
376 /* align to block size, max. PAGE_SIZE */
377 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
378 for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
379 memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
380 crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
381 }
382 return n;
383}
384
369static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, 385static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
370 struct s390_des_ctx *ctx, struct blkcipher_walk *walk) 386 struct s390_des_ctx *ctx,
387 struct blkcipher_walk *walk)
371{ 388{
372 int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); 389 int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
373 unsigned int i, n, nbytes; 390 unsigned int n, nbytes;
374 u8 buf[DES_BLOCK_SIZE]; 391 u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
375 u8 *out, *in; 392 u8 *out, *in, *ctrptr = ctrbuf;
393
394 if (!walk->nbytes)
395 return ret;
376 396
377 memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); 397 if (spin_trylock(&ctrblk_lock))
398 ctrptr = ctrblk;
399
400 memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
378 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { 401 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
379 out = walk->dst.virt.addr; 402 out = walk->dst.virt.addr;
380 in = walk->src.virt.addr; 403 in = walk->src.virt.addr;
381 while (nbytes >= DES_BLOCK_SIZE) { 404 while (nbytes >= DES_BLOCK_SIZE) {
382 /* align to block size, max. PAGE_SIZE */ 405 if (ctrptr == ctrblk)
383 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : 406 n = __ctrblk_init(ctrptr, nbytes);
384 nbytes & ~(DES_BLOCK_SIZE - 1); 407 else
385 for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { 408 n = DES_BLOCK_SIZE;
386 memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, 409 ret = crypt_s390_kmctr(func, ctx->key, out, in,
387 DES_BLOCK_SIZE); 410 n, ctrptr);
388 crypto_inc(ctrblk + i, DES_BLOCK_SIZE); 411 if (ret < 0 || ret != n) {
389 } 412 if (ctrptr == ctrblk)
390 ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); 413 spin_unlock(&ctrblk_lock);
391 if (ret < 0 || ret != n)
392 return -EIO; 414 return -EIO;
415 }
393 if (n > DES_BLOCK_SIZE) 416 if (n > DES_BLOCK_SIZE)
394 memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, 417 memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
395 DES_BLOCK_SIZE); 418 DES_BLOCK_SIZE);
396 crypto_inc(ctrblk, DES_BLOCK_SIZE); 419 crypto_inc(ctrptr, DES_BLOCK_SIZE);
397 out += n; 420 out += n;
398 in += n; 421 in += n;
399 nbytes -= n; 422 nbytes -= n;
400 } 423 }
401 ret = blkcipher_walk_done(desc, walk, nbytes); 424 ret = blkcipher_walk_done(desc, walk, nbytes);
402 } 425 }
403 426 if (ctrptr == ctrblk) {
427 if (nbytes)
428 memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
429 else
430 memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
431 spin_unlock(&ctrblk_lock);
432 }
404 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ 433 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
405 if (nbytes) { 434 if (nbytes) {
406 out = walk->dst.virt.addr; 435 out = walk->dst.virt.addr;
407 in = walk->src.virt.addr; 436 in = walk->src.virt.addr;
408 ret = crypt_s390_kmctr(func, ctx->key, buf, in, 437 ret = crypt_s390_kmctr(func, ctx->key, buf, in,
409 DES_BLOCK_SIZE, ctrblk); 438 DES_BLOCK_SIZE, ctrbuf);
410 if (ret < 0 || ret != DES_BLOCK_SIZE) 439 if (ret < 0 || ret != DES_BLOCK_SIZE)
411 return -EIO; 440 return -EIO;
412 memcpy(out, buf, nbytes); 441 memcpy(out, buf, nbytes);
413 crypto_inc(ctrblk, DES_BLOCK_SIZE); 442 crypto_inc(ctrbuf, DES_BLOCK_SIZE);
414 ret = blkcipher_walk_done(desc, walk, 0); 443 ret = blkcipher_walk_done(desc, walk, 0);
444 memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
415 } 445 }
416 memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
417 return ret; 446 return ret;
418} 447}
419 448
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae2579c..d7c00507568a 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -59,7 +59,7 @@ ENTRY(startup_continue)
59 .quad 0 # cr12: tracing off 59 .quad 0 # cr12: tracing off
60 .quad 0 # cr13: home space segment table 60 .quad 0 # cr13: home space segment table
61 .quad 0xc0000000 # cr14: machine check handling off 61 .quad 0xc0000000 # cr14: machine check handling off
62 .quad 0 # cr15: linkage stack operations 62 .quad .Llinkage_stack # cr15: linkage stack operations
63.Lpcmsk:.quad 0x0000000180000000 63.Lpcmsk:.quad 0x0000000180000000
64.L4malign:.quad 0xffffffffffc00000 64.L4malign:.quad 0xffffffffffc00000
65.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 65.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -67,12 +67,15 @@ ENTRY(startup_continue)
67.Lparmaddr: 67.Lparmaddr:
68 .quad PARMAREA 68 .quad PARMAREA
69 .align 64 69 .align 64
70.Lduct: .long 0,0,0,0,.Lduald,0,0,0 70.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
71 .long 0,0,0,0,0,0,0,0 71 .long 0,0,0,0,0,0,0,0
72.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
72 .align 128 73 .align 128
73.Lduald:.rept 8 74.Lduald:.rept 8
74 .long 0x80000000,0,0,0 # invalid access-list entries 75 .long 0x80000000,0,0,0 # invalid access-list entries
75 .endr 76 .endr
77.Llinkage_stack:
78 .long 0,0,0x89000000,0,0,0,0x8a000000,0
76 79
77ENTRY(_ehead) 80ENTRY(_ehead)
78 81
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..27c50f4d90cb 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/setup.h>
16#include <asm/ipl.h>
15 17
16#define ESSA_SET_STABLE 1 18#define ESSA_SET_STABLE 1
17#define ESSA_SET_UNUSED 2 19#define ESSA_SET_UNUSED 2
@@ -41,6 +43,14 @@ void __init cmma_init(void)
41 43
42 if (!cmma_flag) 44 if (!cmma_flag)
43 return; 45 return;
46 /*
47 * Disable CMM for dump, otherwise the tprot based memory
48 * detection can fail because of unstable pages.
49 */
50 if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
51 cmma_flag = 0;
52 return;
53 }
44 asm volatile( 54 asm volatile(
45 " .insn rrf,0xb9ab0000,%1,%1,0,0\n" 55 " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
46 "0: la %0,0\n" 56 "0: la %0,0\n"