aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/nx
diff options
context:
space:
mode:
authorMarcelo Cerri <mhcerri@linux.vnet.ibm.com>2013-08-29 10:36:39 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2013-09-02 06:32:55 -0400
commitdec0ed6c1b2c8c2aa37c04feccaf4784764c95f1 (patch)
tree8d49a66edd8468c22598ae8aa13af9c6aae799f1 /drivers/crypto/nx
parent41e3173daf4e2d2f2dcc48ae7ffc8d0c4f3ecec9 (diff)
crypto: nx - fix GCM for zero length messages
The NX CGM implementation doesn't support zero length messages and the current implementation has two flaws: - When the input data length is zero, it ignores the associated data. - Even when both lengths are zero, it uses the Crypto API to encrypt a zeroed block using ctr(aes) and because of this it allocates a new transformation and sets the key for this new tfm. Both operations are intended to be used only in user context, while the cryptographic operations can be called in both user and softirq contexts. This patch replaces the nested Crypto API use and adds two special cases: - When input data and associated data lengths are zero: it uses NX ECB mode to emulate the encryption of a zeroed block using ctr(aes). - When input data is zero and associated data is available: it uses NX GMAC mode to calculate the associated data MAC. Reviewed-by: Joy Latten <jmlatten@linux.vnet.ibm.com> Signed-off-by: Marcelo Cerri <mhcerri@linux.vnet.ibm.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/nx')
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c132
1 files changed, 112 insertions, 20 deletions
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 9e89bdf34487..025d9a8d5b19 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -187,40 +187,125 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
187 return rc; 187 return rc;
188} 188}
189 189
190static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
191{
192 int rc;
193 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
194 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
195 struct nx_sg *nx_sg;
196 unsigned int nbytes = req->assoclen;
197 unsigned int processed = 0, to_process;
198 u32 max_sg_len;
199
200 /* Set GMAC mode */
201 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
202
203 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
204
205 /* page_limit: number of sg entries that fit on one page */
206 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
207 nx_ctx->ap->sglen);
208
209 /* Copy IV */
210 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
211
212 do {
213 /*
214 * to_process: the data chunk to process in this update.
215 * This value is bound by sg list limits.
216 */
217 to_process = min_t(u64, nbytes - processed,
218 nx_ctx->ap->databytelen);
219 to_process = min_t(u64, to_process,
220 NX_PAGE_SIZE * (max_sg_len - 1));
221
222 if ((to_process + processed) < nbytes)
223 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
224 else
225 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
226
227 nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
228 req->assoc, processed, to_process);
229 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
230 * sizeof(struct nx_sg);
231
232 csbcpb->cpb.aes_gcm.bit_length_data = 0;
233 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
234
235 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
236 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
237 if (rc)
238 goto out;
239
240 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
241 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
242 memcpy(csbcpb->cpb.aes_gcm.in_s0,
243 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
244
245 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
246
247 atomic_inc(&(nx_ctx->stats->aes_ops));
248 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
249
250 processed += to_process;
251 } while (processed < nbytes);
252
253out:
254 /* Restore GCM mode */
255 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
256 return rc;
257}
258
190static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, 259static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
191 int enc) 260 int enc)
192{ 261{
193 int rc; 262 int rc;
194 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 263 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
195 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 264 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
265 char out[AES_BLOCK_SIZE];
266 struct nx_sg *in_sg, *out_sg;
196 267
197 /* For scenarios where the input message is zero length, AES CTR mode 268 /* For scenarios where the input message is zero length, AES CTR mode
198 * may be used. Set the source data to be a single block (16B) of all 269 * may be used. Set the source data to be a single block (16B) of all
199 * zeros, and set the input IV value to be the same as the GMAC IV 270 * zeros, and set the input IV value to be the same as the GMAC IV
200 * value. - nx_wb 4.8.1.3 */ 271 * value. - nx_wb 4.8.1.3 */
201 char src[AES_BLOCK_SIZE] = {};
202 struct scatterlist sg;
203
204 desc->tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
205 if (IS_ERR(desc->tfm)) {
206 rc = -ENOMEM;
207 goto out;
208 }
209
210 crypto_blkcipher_setkey(desc->tfm, csbcpb->cpb.aes_gcm.key,
211 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
212 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
213 272
214 sg_init_one(&sg, src, AES_BLOCK_SIZE); 273 /* Change to ECB mode */
274 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
275 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
276 sizeof(csbcpb->cpb.aes_ecb.key));
215 if (enc) 277 if (enc)
216 rc = crypto_blkcipher_encrypt_iv(desc, req->dst, &sg, 278 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
217 AES_BLOCK_SIZE);
218 else 279 else
219 rc = crypto_blkcipher_decrypt_iv(desc, req->dst, &sg, 280 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
220 AES_BLOCK_SIZE); 281
221 crypto_free_blkcipher(desc->tfm); 282 /* Encrypt the counter/IV */
283 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
284 AES_BLOCK_SIZE, nx_ctx->ap->sglen);
285 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
286 nx_ctx->ap->sglen);
287 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
288 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
222 289
290 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
291 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
292 if (rc)
293 goto out;
294 atomic_inc(&(nx_ctx->stats->aes_ops));
295
296 /* Copy out the auth tag */
297 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
298 crypto_aead_authsize(crypto_aead_reqtfm(req)));
223out: 299out:
300 /* Restore XCBC mode */
301 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
302
303 /*
304 * ECB key uses the same region that GCM AAD and counter, so it's safe
305 * to just fill it with zeroes.
306 */
307 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
308
224 return rc; 309 return rc;
225} 310}
226 311
@@ -242,8 +327,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
242 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 327 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
243 328
244 if (nbytes == 0) { 329 if (nbytes == 0) {
245 rc = gcm_empty(req, &desc, enc); 330 if (req->assoclen == 0)
246 goto out; 331 rc = gcm_empty(req, &desc, enc);
332 else
333 rc = gmac(req, &desc);
334 if (rc)
335 goto out;
336 else
337 goto mac;
247 } 338 }
248 339
249 /* Process associated data */ 340 /* Process associated data */
@@ -310,6 +401,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
310 processed += to_process; 401 processed += to_process;
311 } while (processed < nbytes); 402 } while (processed < nbytes);
312 403
404mac:
313 if (enc) { 405 if (enc) {
314 /* copy out the auth tag */ 406 /* copy out the auth tag */
315 scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, 407 scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,