aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-08-17 10:57:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-08-17 10:57:46 -0400
commita36304b9e154ccaff67b1d1d0ece6ad6380e3648 (patch)
tree4d487254bb61712cb64146763d787e2b1040d4e9
parent2c6625cd545bdd66acff14f3394865d43920a5c7 (diff)
parentb310c178e6d897f82abb9da3af1cd7c02b09f592 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "This fixes the following issues: - a regression caused by the conversion of IPsec ESP to the new AEAD interface: ESN with authencesn no longer works because it relied on the AD input SG list having a specific layout which is no longer the case. In linux-next authencesn is fixed properly and no longer assumes anything about the SG list format. While for this release a minimal fix is applied to authencesn so that it works with the new linear layout. - fix memory corruption caused by bogus index in the caam hash code. - fix powerpc nx SHA hashing which could cause module load failures if module signature verification is enabled" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: caam - fix memory corruption in ahash_final_ctx crypto: nx - respect sg limit bounds when building sg lists for SHA crypto: authencesn - Fix breakage with new ESP code
-rw-r--r--crypto/authencesn.c44
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/nx/nx-sha256.c27
-rw-r--r--drivers/crypto/nx/nx-sha512.c28
4 files changed, 48 insertions, 58 deletions
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index a3da6770bc9e..b8efe36ce114 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
393 struct scatterlist *cipher = areq_ctx->cipher; 393 struct scatterlist *cipher = areq_ctx->cipher;
394 struct scatterlist *hsg = areq_ctx->hsg; 394 struct scatterlist *hsg = areq_ctx->hsg;
395 struct scatterlist *tsg = areq_ctx->tsg; 395 struct scatterlist *tsg = areq_ctx->tsg;
396 struct scatterlist *assoc1;
397 struct scatterlist *assoc2;
398 unsigned int ivsize = crypto_aead_ivsize(authenc_esn); 396 unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
399 unsigned int cryptlen = req->cryptlen; 397 unsigned int cryptlen = req->cryptlen;
400 struct page *dstp; 398 struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
412 cryptlen += ivsize; 410 cryptlen += ivsize;
413 } 411 }
414 412
415 if (sg_is_last(assoc)) 413 if (assoc->length < 12)
416 return -EINVAL;
417
418 assoc1 = assoc + 1;
419 if (sg_is_last(assoc1))
420 return -EINVAL;
421
422 assoc2 = assoc + 2;
423 if (!sg_is_last(assoc2))
424 return -EINVAL; 414 return -EINVAL;
425 415
426 sg_init_table(hsg, 2); 416 sg_init_table(hsg, 2);
427 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); 417 sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
428 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); 418 sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
429 419
430 sg_init_table(tsg, 1); 420 sg_init_table(tsg, 1);
431 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); 421 sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
432 422
433 areq_ctx->cryptlen = cryptlen; 423 areq_ctx->cryptlen = cryptlen;
434 areq_ctx->headlen = assoc->length + assoc2->length; 424 areq_ctx->headlen = 8;
435 areq_ctx->trailen = assoc1->length; 425 areq_ctx->trailen = 4;
436 areq_ctx->sg = dst; 426 areq_ctx->sg = dst;
437 427
438 areq_ctx->complete = authenc_esn_geniv_ahash_done; 428 areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
563 struct scatterlist *cipher = areq_ctx->cipher; 553 struct scatterlist *cipher = areq_ctx->cipher;
564 struct scatterlist *hsg = areq_ctx->hsg; 554 struct scatterlist *hsg = areq_ctx->hsg;
565 struct scatterlist *tsg = areq_ctx->tsg; 555 struct scatterlist *tsg = areq_ctx->tsg;
566 struct scatterlist *assoc1;
567 struct scatterlist *assoc2;
568 unsigned int ivsize = crypto_aead_ivsize(authenc_esn); 556 unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
569 struct page *srcp; 557 struct page *srcp;
570 u8 *vsrc; 558 u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
580 cryptlen += ivsize; 568 cryptlen += ivsize;
581 } 569 }
582 570
583 if (sg_is_last(assoc)) 571 if (assoc->length < 12)
584 return -EINVAL;
585
586 assoc1 = assoc + 1;
587 if (sg_is_last(assoc1))
588 return -EINVAL;
589
590 assoc2 = assoc + 2;
591 if (!sg_is_last(assoc2))
592 return -EINVAL; 572 return -EINVAL;
593 573
594 sg_init_table(hsg, 2); 574 sg_init_table(hsg, 2);
595 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); 575 sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
596 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); 576 sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
597 577
598 sg_init_table(tsg, 1); 578 sg_init_table(tsg, 1);
599 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); 579 sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
600 580
601 areq_ctx->cryptlen = cryptlen; 581 areq_ctx->cryptlen = cryptlen;
602 areq_ctx->headlen = assoc->length + assoc2->length; 582 areq_ctx->headlen = 8;
603 areq_ctx->trailen = assoc1->length; 583 areq_ctx->trailen = 4;
604 areq_ctx->sg = src; 584 areq_ctx->sg = src;
605 585
606 areq_ctx->complete = authenc_esn_verify_ahash_done; 586 areq_ctx->complete = authenc_esn_verify_ahash_done;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e8099969..f9c78751989e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
909 state->buflen_1; 909 state->buflen_1;
910 u32 *sh_desc = ctx->sh_desc_fin, *desc; 910 u32 *sh_desc = ctx->sh_desc_fin, *desc;
911 dma_addr_t ptr = ctx->sh_desc_fin_dma; 911 dma_addr_t ptr = ctx->sh_desc_fin_dma;
912 int sec4_sg_bytes; 912 int sec4_sg_bytes, sec4_sg_src_index;
913 int digestsize = crypto_ahash_digestsize(ahash); 913 int digestsize = crypto_ahash_digestsize(ahash);
914 struct ahash_edesc *edesc; 914 struct ahash_edesc *edesc;
915 int ret = 0; 915 int ret = 0;
916 int sh_len; 916 int sh_len;
917 917
918 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); 918 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
919 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
919 920
920 /* allocate space for base edesc and hw desc commands, link tables */ 921 /* allocate space for base edesc and hw desc commands, link tables */
921 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 922 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
942 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 943 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
943 buf, state->buf_dma, buflen, 944 buf, state->buf_dma, buflen,
944 last_buflen); 945 last_buflen);
945 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; 946 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
946 947
947 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 948 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
948 sec4_sg_bytes, DMA_TO_DEVICE); 949 sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 08f8d5cd6334..becb738c897b 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
71 struct sha256_state *sctx = shash_desc_ctx(desc); 71 struct sha256_state *sctx = shash_desc_ctx(desc);
72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
74 struct nx_sg *in_sg;
75 struct nx_sg *out_sg; 74 struct nx_sg *out_sg;
76 u64 to_process = 0, leftover, total; 75 u64 to_process = 0, leftover, total;
77 unsigned long irq_flags; 76 unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
97 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
98 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
99 98
100 in_sg = nx_ctx->in_sg;
101 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
102 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
103 max_sg_len = min_t(u64, max_sg_len, 101 max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
114 } 112 }
115 113
116 do { 114 do {
117 /* 115 int used_sgs = 0;
118 * to_process: the SHA256_BLOCK_SIZE data chunk to process in 116 struct nx_sg *in_sg = nx_ctx->in_sg;
119 * this update. This value is also restricted by the sg list
120 * limits.
121 */
122 to_process = total - to_process;
123 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
124 117
125 if (buf_len) { 118 if (buf_len) {
126 data_len = buf_len; 119 data_len = buf_len;
127 in_sg = nx_build_sg_list(nx_ctx->in_sg, 120 in_sg = nx_build_sg_list(in_sg,
128 (u8 *) sctx->buf, 121 (u8 *) sctx->buf,
129 &data_len, 122 &data_len,
130 max_sg_len); 123 max_sg_len);
@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
133 rc = -EINVAL; 126 rc = -EINVAL;
134 goto out; 127 goto out;
135 } 128 }
129 used_sgs = in_sg - nx_ctx->in_sg;
136 } 130 }
137 131
132 /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
133 * processed in this iteration. This value is restricted
134 * by sg list limits and number of sgs we already used
135 * for leftover data. (see above)
136 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
137 * but because data may not be aligned, we need to account
138 * for that too. */
139 to_process = min_t(u64, total,
140 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
141 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
142
138 data_len = to_process - buf_len; 143 data_len = to_process - buf_len;
139 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 144 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
140 &data_len, max_sg_len); 145 &data_len, max_sg_len);
141 146
142 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 147 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
143 148
144 to_process = (data_len + buf_len); 149 to_process = data_len + buf_len;
145 leftover = total - to_process; 150 leftover = total - to_process;
146 151
147 /* 152 /*
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index aff0fe58eac0..b6e183d58d73 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
71 struct sha512_state *sctx = shash_desc_ctx(desc); 71 struct sha512_state *sctx = shash_desc_ctx(desc);
72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
74 struct nx_sg *in_sg;
75 struct nx_sg *out_sg; 74 struct nx_sg *out_sg;
76 u64 to_process, leftover = 0, total; 75 u64 to_process, leftover = 0, total;
77 unsigned long irq_flags; 76 unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
97 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
98 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
99 98
100 in_sg = nx_ctx->in_sg;
101 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
102 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
103 max_sg_len = min_t(u64, max_sg_len, 101 max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
114 } 112 }
115 113
116 do { 114 do {
117 /* 115 int used_sgs = 0;
118 * to_process: the SHA512_BLOCK_SIZE data chunk to process in 116 struct nx_sg *in_sg = nx_ctx->in_sg;
119 * this update. This value is also restricted by the sg list
120 * limits.
121 */
122 to_process = total - leftover;
123 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
124 leftover = total - to_process;
125 117
126 if (buf_len) { 118 if (buf_len) {
127 data_len = buf_len; 119 data_len = buf_len;
128 in_sg = nx_build_sg_list(nx_ctx->in_sg, 120 in_sg = nx_build_sg_list(in_sg,
129 (u8 *) sctx->buf, 121 (u8 *) sctx->buf,
130 &data_len, max_sg_len); 122 &data_len, max_sg_len);
131 123
@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
133 rc = -EINVAL; 125 rc = -EINVAL;
134 goto out; 126 goto out;
135 } 127 }
128 used_sgs = in_sg - nx_ctx->in_sg;
136 } 129 }
137 130
131 /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
132 * processed in this iteration. This value is restricted
133 * by sg list limits and number of sgs we already used
134 * for leftover data. (see above)
135 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
136 * but because data may not be aligned, we need to account
137 * for that too. */
138 to_process = min_t(u64, total,
139 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
140 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
141
138 data_len = to_process - buf_len; 142 data_len = to_process - buf_len;
139 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 143 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
140 &data_len, max_sg_len); 144 &data_len, max_sg_len);
@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
146 goto out; 150 goto out;
147 } 151 }
148 152
149 to_process = (data_len + buf_len); 153 to_process = data_len + buf_len;
150 leftover = total - to_process; 154 leftover = total - to_process;
151 155
152 /* 156 /*