diff options
author | Kent Yoder <key@linux.vnet.ibm.com> | 2013-04-12 13:13:59 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-05-24 04:11:10 -0400 |
commit | 1ad936e850a896bc16e0d72a56be432f9954ad7e (patch) | |
tree | d0108b0f99c61e6779eadbaebf29ed5b6be2e485 /drivers/crypto | |
parent | 519fe2ecb755b875d9814cdda19778c2e88c6901 (diff) |
drivers/crypto/nx: Fixes for multiple races and issues
Fixes a race on driver init with registering algorithms where the
driver status flag wasn't being set before self testing started.
Added the cra_alignmask field for CBC and ECB modes.
Fixed a bug in GCM where AES block size was being used instead of
authsize.
Removed use of blkcipher_walk routines for scatterlist processing.
Corner cases in the code prevent us from processing an entire
scatterlist at a time and walking the buffers in block sized chunks
turns out to be unecessary anyway.
Fixed off-by-one error in saving off extra data in the sha code.
Fixed accounting error for number of bytes processed in the sha code.
Signed-off-by: Kent Yoder <key@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/nx/nx-aes-cbc.c | 1 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-aes-ecb.c | 1 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-aes-gcm.c | 2 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-sha256.c | 8 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-sha512.c | 7 | ||||
-rw-r--r-- | drivers/crypto/nx/nx.c | 38 |
6 files changed, 19 insertions, 38 deletions
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index a76d4c4f29f5..35d483f8db66 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c | |||
@@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = { | |||
126 | .cra_blocksize = AES_BLOCK_SIZE, | 126 | .cra_blocksize = AES_BLOCK_SIZE, |
127 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 127 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
128 | .cra_type = &crypto_blkcipher_type, | 128 | .cra_type = &crypto_blkcipher_type, |
129 | .cra_alignmask = 0xf, | ||
129 | .cra_module = THIS_MODULE, | 130 | .cra_module = THIS_MODULE, |
130 | .cra_init = nx_crypto_ctx_aes_cbc_init, | 131 | .cra_init = nx_crypto_ctx_aes_cbc_init, |
131 | .cra_exit = nx_crypto_ctx_exit, | 132 | .cra_exit = nx_crypto_ctx_exit, |
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index ba5f1611336f..7bbc9a81da21 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c | |||
@@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = { | |||
123 | .cra_priority = 300, | 123 | .cra_priority = 300, |
124 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | 124 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
125 | .cra_blocksize = AES_BLOCK_SIZE, | 125 | .cra_blocksize = AES_BLOCK_SIZE, |
126 | .cra_alignmask = 0xf, | ||
126 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 127 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
127 | .cra_type = &crypto_blkcipher_type, | 128 | .cra_type = &crypto_blkcipher_type, |
128 | .cra_module = THIS_MODULE, | 129 | .cra_module = THIS_MODULE, |
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index c8109edc5cfb..6cca6c392b00 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c | |||
@@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | |||
219 | if (enc) | 219 | if (enc) |
220 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | 220 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
221 | else | 221 | else |
222 | nbytes -= AES_BLOCK_SIZE; | 222 | nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); |
223 | 223 | ||
224 | csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; | 224 | csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; |
225 | 225 | ||
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 9767315f8c0b..67024f2f0b78 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c | |||
@@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, | |||
69 | * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 | 69 | * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 |
70 | * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover | 70 | * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover |
71 | */ | 71 | */ |
72 | if (len + sctx->count <= SHA256_BLOCK_SIZE) { | 72 | if (len + sctx->count < SHA256_BLOCK_SIZE) { |
73 | memcpy(sctx->buf + sctx->count, data, len); | 73 | memcpy(sctx->buf + sctx->count, data, len); |
74 | sctx->count += len; | 74 | sctx->count += len; |
75 | goto out; | 75 | goto out; |
@@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, | |||
110 | atomic_inc(&(nx_ctx->stats->sha256_ops)); | 110 | atomic_inc(&(nx_ctx->stats->sha256_ops)); |
111 | 111 | ||
112 | /* copy the leftover back into the state struct */ | 112 | /* copy the leftover back into the state struct */ |
113 | memcpy(sctx->buf, data + len - leftover, leftover); | 113 | if (leftover) |
114 | memcpy(sctx->buf, data + len - leftover, leftover); | ||
114 | sctx->count = leftover; | 115 | sctx->count = leftover; |
115 | 116 | ||
116 | csbcpb->cpb.sha256.message_bit_length += (u64) | 117 | csbcpb->cpb.sha256.message_bit_length += (u64) |
@@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) | |||
130 | struct nx_sg *in_sg, *out_sg; | 131 | struct nx_sg *in_sg, *out_sg; |
131 | int rc; | 132 | int rc; |
132 | 133 | ||
134 | |||
133 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 135 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { |
134 | /* we've hit the nx chip previously, now we're finalizing, | 136 | /* we've hit the nx chip previously, now we're finalizing, |
135 | * so copy over the partial digest */ | 137 | * so copy over the partial digest */ |
@@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) | |||
162 | 164 | ||
163 | atomic_inc(&(nx_ctx->stats->sha256_ops)); | 165 | atomic_inc(&(nx_ctx->stats->sha256_ops)); |
164 | 166 | ||
165 | atomic64_add(csbcpb->cpb.sha256.message_bit_length, | 167 | atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8, |
166 | &(nx_ctx->stats->sha256_bytes)); | 168 | &(nx_ctx->stats->sha256_bytes)); |
167 | memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); | 169 | memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); |
168 | out: | 170 | out: |
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 3177b8c3d5f1..08eee1122349 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c | |||
@@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
69 | * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 | 69 | * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 |
70 | * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover | 70 | * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover |
71 | */ | 71 | */ |
72 | if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) { | 72 | if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { |
73 | memcpy(sctx->buf + sctx->count[0], data, len); | 73 | memcpy(sctx->buf + sctx->count[0], data, len); |
74 | sctx->count[0] += len; | 74 | sctx->count[0] += len; |
75 | goto out; | 75 | goto out; |
@@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
110 | atomic_inc(&(nx_ctx->stats->sha512_ops)); | 110 | atomic_inc(&(nx_ctx->stats->sha512_ops)); |
111 | 111 | ||
112 | /* copy the leftover back into the state struct */ | 112 | /* copy the leftover back into the state struct */ |
113 | memcpy(sctx->buf, data + len - leftover, leftover); | 113 | if (leftover) |
114 | memcpy(sctx->buf, data + len - leftover, leftover); | ||
114 | sctx->count[0] = leftover; | 115 | sctx->count[0] = leftover; |
115 | 116 | ||
116 | spbc_bits = csbcpb->cpb.sha512.spbc * 8; | 117 | spbc_bits = csbcpb->cpb.sha512.spbc * 8; |
@@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) | |||
168 | goto out; | 169 | goto out; |
169 | 170 | ||
170 | atomic_inc(&(nx_ctx->stats->sha512_ops)); | 171 | atomic_inc(&(nx_ctx->stats->sha512_ops)); |
171 | atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo, | 172 | atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8, |
172 | &(nx_ctx->stats->sha512_bytes)); | 173 | &(nx_ctx->stats->sha512_bytes)); |
173 | 174 | ||
174 | memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); | 175 | memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); |
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index c767f232e693..bbdab6e5ccf0 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
@@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, | |||
211 | { | 211 | { |
212 | struct nx_sg *nx_insg = nx_ctx->in_sg; | 212 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
213 | struct nx_sg *nx_outsg = nx_ctx->out_sg; | 213 | struct nx_sg *nx_outsg = nx_ctx->out_sg; |
214 | struct blkcipher_walk walk; | ||
215 | int rc; | ||
216 | |||
217 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
218 | rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | ||
219 | if (rc) | ||
220 | goto out; | ||
221 | 214 | ||
222 | if (iv) | 215 | if (iv) |
223 | memcpy(iv, walk.iv, AES_BLOCK_SIZE); | 216 | memcpy(iv, desc->info, AES_BLOCK_SIZE); |
224 | 217 | ||
225 | while (walk.nbytes) { | 218 | nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); |
226 | nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, | 219 | nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); |
227 | walk.nbytes, nx_ctx->ap->sglen); | ||
228 | nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, | ||
229 | walk.nbytes, nx_ctx->ap->sglen); | ||
230 | |||
231 | rc = blkcipher_walk_done(desc, &walk, 0); | ||
232 | if (rc) | ||
233 | break; | ||
234 | } | ||
235 | |||
236 | if (walk.nbytes) { | ||
237 | nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, | ||
238 | walk.nbytes, nx_ctx->ap->sglen); | ||
239 | nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, | ||
240 | walk.nbytes, nx_ctx->ap->sglen); | ||
241 | |||
242 | rc = 0; | ||
243 | } | ||
244 | 220 | ||
245 | /* these lengths should be negative, which will indicate to phyp that | 221 | /* these lengths should be negative, which will indicate to phyp that |
246 | * the input and output parameters are scatterlists, not linear | 222 | * the input and output parameters are scatterlists, not linear |
247 | * buffers */ | 223 | * buffers */ |
248 | nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); | 224 | nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); |
249 | nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); | 225 | nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); |
250 | out: | 226 | |
251 | return rc; | 227 | return 0; |
252 | } | 228 | } |
253 | 229 | ||
254 | /** | 230 | /** |
@@ -454,6 +430,8 @@ static int nx_register_algs(void) | |||
454 | if (rc) | 430 | if (rc) |
455 | goto out; | 431 | goto out; |
456 | 432 | ||
433 | nx_driver.of.status = NX_OKAY; | ||
434 | |||
457 | rc = crypto_register_alg(&nx_ecb_aes_alg); | 435 | rc = crypto_register_alg(&nx_ecb_aes_alg); |
458 | if (rc) | 436 | if (rc) |
459 | goto out; | 437 | goto out; |
@@ -498,8 +476,6 @@ static int nx_register_algs(void) | |||
498 | if (rc) | 476 | if (rc) |
499 | goto out_unreg_s512; | 477 | goto out_unreg_s512; |
500 | 478 | ||
501 | nx_driver.of.status = NX_OKAY; | ||
502 | |||
503 | goto out; | 479 | goto out; |
504 | 480 | ||
505 | out_unreg_s512: | 481 | out_unreg_s512: |