aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/nx
diff options
context:
space:
mode:
authorFionnuala Gunter <fin@linux.vnet.ibm.com>2013-08-29 10:36:37 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2013-09-02 06:32:54 -0400
commit2b188b3b86005ca63eb851a1992f06b9a301f800 (patch)
tree1ab0e862e7b7570735ebf3d63851709e1cfff5d6 /drivers/crypto/nx
parent9d6f1a82d3a81d603526980ef705b9ab39f997f3 (diff)
crypto: nx - fix limits to sg lists for AES-CCM
This patch updates the NX driver to perform several hyper calls when necessary so that the length limits of scatter/gather lists are respected. Reviewed-by: Marcelo Cerri <mhcerri@linux.vnet.ibm.com> Signed-off-by: Joy Latten <jmlatten@linux.vnet.ibm.com> Signed-off-by: Fionnuala Gunter <fin@linux.vnet.ibm.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/nx')
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c283
1 files changed, 208 insertions, 75 deletions
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 666a35b1181a..5ecd4c2414aa 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -179,13 +179,26 @@ static int generate_pat(u8 *iv,
179 struct nx_sg *nx_insg = nx_ctx->in_sg; 179 struct nx_sg *nx_insg = nx_ctx->in_sg;
180 struct nx_sg *nx_outsg = nx_ctx->out_sg; 180 struct nx_sg *nx_outsg = nx_ctx->out_sg;
181 unsigned int iauth_len = 0; 181 unsigned int iauth_len = 0;
182 struct vio_pfo_op *op = NULL;
183 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; 182 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
184 int rc; 183 int rc;
185 184
186 /* zero the ctr value */ 185 /* zero the ctr value */
187 memset(iv + 15 - iv[0], 0, iv[0] + 1); 186 memset(iv + 15 - iv[0], 0, iv[0] + 1);
188 187
188 /* page 78 of nx_wb.pdf has,
189 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
190 * in length. If a full message is used, the AES CCA implementation
191 * restricts the maximum AAD length to 2^32 -1 bytes.
192 * If partial messages are used, the implementation supports
193 * 2^64 -1 bytes maximum AAD length.
194 *
195 * However, in the cryptoapi's aead_request structure,
196 * assoclen is an unsigned int, thus it cannot hold a length
197 * value greater than 2^32 - 1.
198 * Thus the AAD is further constrained by this and is never
199 * greater than 2^32.
200 */
201
189 if (!req->assoclen) { 202 if (!req->assoclen) {
190 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; 203 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
191 } else if (req->assoclen <= 14) { 204 } else if (req->assoclen <= 14) {
@@ -195,7 +208,46 @@ static int generate_pat(u8 *iv,
195 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; 208 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
196 b1 = nx_ctx->priv.ccm.iauth_tag; 209 b1 = nx_ctx->priv.ccm.iauth_tag;
197 iauth_len = req->assoclen; 210 iauth_len = req->assoclen;
211 } else if (req->assoclen <= 65280) {
212 /* if associated data is less than (2^16 - 2^8), we construct
213 * B1 differently and feed in the associated data to a CCA
214 * operation */
215 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
216 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
217 iauth_len = 14;
218 } else {
219 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
220 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
221 iauth_len = 10;
222 }
223
224 /* generate B0 */
225 rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
226 if (rc)
227 return rc;
228
229 /* generate B1:
230 * add control info for associated data
231 * RFC 3610 and NIST Special Publication 800-38C
232 */
233 if (b1) {
234 memset(b1, 0, 16);
235 if (req->assoclen <= 65280) {
236 *(u16 *)b1 = (u16)req->assoclen;
237 scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
238 iauth_len, SCATTERWALK_FROM_SG);
239 } else {
240 *(u16 *)b1 = (u16)(0xfffe);
241 *(u32 *)&b1[2] = (u32)req->assoclen;
242 scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
243 iauth_len, SCATTERWALK_FROM_SG);
244 }
245 }
198 246
247 /* now copy any remaining AAD to scatterlist and call nx... */
248 if (!req->assoclen) {
249 return rc;
250 } else if (req->assoclen <= 14) {
199 nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen); 251 nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
200 nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16, 252 nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
201 nx_ctx->ap->sglen); 253 nx_ctx->ap->sglen);
@@ -210,56 +262,74 @@ static int generate_pat(u8 *iv,
210 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; 262 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
211 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; 263 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
212 264
213 op = &nx_ctx->op;
214 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; 265 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
215 } else if (req->assoclen <= 65280) {
216 /* if associated data is less than (2^16 - 2^8), we construct
217 * B1 differently and feed in the associated data to a CCA
218 * operation */
219 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
220 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
221 iauth_len = 14;
222 266
223 /* remaining assoc data must have scatterlist built for it */ 267 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
224 nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, 268 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
225 req->assoc, iauth_len, 269 if (rc)
226 req->assoclen - iauth_len); 270 return rc;
227 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * 271
228 sizeof(struct nx_sg); 272 atomic_inc(&(nx_ctx->stats->aes_ops));
273 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
229 274
230 op = &nx_ctx->op_aead;
231 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
232 } else { 275 } else {
233 /* if associated data is less than (2^32), we construct B1 276 u32 max_sg_len;
234 * differently yet again and feed in the associated data to a 277 unsigned int processed = 0, to_process;
235 * CCA operation */ 278
236 pr_err("associated data len is %u bytes (returning -EINVAL)\n", 279 /* page_limit: number of sg entries that fit on one page */
237 req->assoclen); 280 max_sg_len = min_t(u32,
238 rc = -EINVAL; 281 nx_driver.of.max_sg_len/sizeof(struct nx_sg),
239 } 282 nx_ctx->ap->sglen);
283
284 processed += iauth_len;
285
286 do {
287 to_process = min_t(u32, req->assoclen - processed,
288 nx_ctx->ap->databytelen);
289 to_process = min_t(u64, to_process,
290 NX_PAGE_SIZE * (max_sg_len - 1));
291
292 if ((to_process + processed) < req->assoclen) {
293 NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
294 NX_FDM_INTERMEDIATE;
295 } else {
296 NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
297 ~NX_FDM_INTERMEDIATE;
298 }
299
300 nx_insg = nx_walk_and_build(nx_ctx->in_sg,
301 nx_ctx->ap->sglen,
302 req->assoc, processed,
303 to_process);
304
305 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
306 sizeof(struct nx_sg);
240 307
241 rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); 308 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
242 if (rc)
243 goto done;
244 309
245 if (b1) { 310 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
246 memset(b1, 0, 16); 311 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
247 *(u16 *)b1 = (u16)req->assoclen; 312 if (rc)
313 return rc;
248 314
249 scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, 315 memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
250 iauth_len, SCATTERWALK_FROM_SG); 316 nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
317 AES_BLOCK_SIZE);
251 318
252 rc = nx_hcall_sync(nx_ctx, op, 319 NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
253 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
254 if (rc)
255 goto done;
256 320
257 atomic_inc(&(nx_ctx->stats->aes_ops)); 321 atomic_inc(&(nx_ctx->stats->aes_ops));
258 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); 322 atomic64_add(req->assoclen,
323 &(nx_ctx->stats->aes_bytes));
324
325 processed += to_process;
326 } while (processed < req->assoclen);
259 327
260 memcpy(out, result, AES_BLOCK_SIZE); 328 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
261 } 329 }
262done: 330
331 memcpy(out, result, AES_BLOCK_SIZE);
332
263 return rc; 333 return rc;
264} 334}
265 335
@@ -272,15 +342,12 @@ static int ccm_nx_decrypt(struct aead_request *req,
272 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 342 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
273 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; 343 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
274 unsigned long irq_flags; 344 unsigned long irq_flags;
345 unsigned int processed = 0, to_process;
346 u32 max_sg_len;
275 int rc = -1; 347 int rc = -1;
276 348
277 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 349 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
278 350
279 if (nbytes > nx_ctx->ap->databytelen) {
280 rc = -EINVAL;
281 goto out;
282 }
283
284 nbytes -= authsize; 351 nbytes -= authsize;
285 352
286 /* copy out the auth tag to compare with later */ 353 /* copy out the auth tag to compare with later */
@@ -293,22 +360,56 @@ static int ccm_nx_decrypt(struct aead_request *req,
293 if (rc) 360 if (rc)
294 goto out; 361 goto out;
295 362
296 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, 0, 363 /* page_limit: number of sg entries that fit on one page */
297 csbcpb->cpb.aes_ccm.iv_or_ctr); 364 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
298 if (rc) 365 nx_ctx->ap->sglen);
299 goto out; 366
367 do {
300 368
301 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 369 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
302 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE; 370 * update. This value is bound by sg list limits.
371 */
372 to_process = min_t(u64, nbytes - processed,
373 nx_ctx->ap->databytelen);
374 to_process = min_t(u64, to_process,
375 NX_PAGE_SIZE * (max_sg_len - 1));
303 376
304 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 377 if ((to_process + processed) < nbytes)
378 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
379 else
380 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
381
382 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
383
384 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
385 to_process, processed,
386 csbcpb->cpb.aes_ccm.iv_or_ctr);
387 if (rc)
388 goto out;
389
390 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
305 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 391 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
306 if (rc) 392 if (rc)
307 goto out; 393 goto out;
394
395 /* for partial completion, copy following for next
396 * entry into loop...
397 */
398 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
399 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
400 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
401 memcpy(csbcpb->cpb.aes_ccm.in_s0,
402 csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
403
404 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
405
406 /* update stats */
407 atomic_inc(&(nx_ctx->stats->aes_ops));
408 atomic64_add(csbcpb->csb.processed_byte_count,
409 &(nx_ctx->stats->aes_bytes));
308 410
309 atomic_inc(&(nx_ctx->stats->aes_ops)); 411 processed += to_process;
310 atomic64_add(csbcpb->csb.processed_byte_count, 412 } while (processed < nbytes);
311 &(nx_ctx->stats->aes_bytes));
312 413
313 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, 414 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
314 authsize) ? -EBADMSG : 0; 415 authsize) ? -EBADMSG : 0;
@@ -325,41 +426,73 @@ static int ccm_nx_encrypt(struct aead_request *req,
325 unsigned int nbytes = req->cryptlen; 426 unsigned int nbytes = req->cryptlen;
326 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 427 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
327 unsigned long irq_flags; 428 unsigned long irq_flags;
429 unsigned int processed = 0, to_process;
430 u32 max_sg_len;
328 int rc = -1; 431 int rc = -1;
329 432
330 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 433 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
331 434
332 if (nbytes > nx_ctx->ap->databytelen) {
333 rc = -EINVAL;
334 goto out;
335 }
336
337 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, 435 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
338 csbcpb->cpb.aes_ccm.in_pat_or_b0); 436 csbcpb->cpb.aes_ccm.in_pat_or_b0);
339 if (rc) 437 if (rc)
340 goto out; 438 goto out;
341 439
342 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, 0, 440 /* page_limit: number of sg entries that fit on one page */
343 csbcpb->cpb.aes_ccm.iv_or_ctr); 441 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
344 if (rc) 442 nx_ctx->ap->sglen);
345 goto out; 443
444 do {
445 /* to process: the AES_BLOCK_SIZE data chunk to process in this
446 * update. This value is bound by sg list limits.
447 */
448 to_process = min_t(u64, nbytes - processed,
449 nx_ctx->ap->databytelen);
450 to_process = min_t(u64, to_process,
451 NX_PAGE_SIZE * (max_sg_len - 1));
452
453 if ((to_process + processed) < nbytes)
454 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
455 else
456 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
457
458 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
459
460 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
461 to_process, processed,
462 csbcpb->cpb.aes_ccm.iv_or_ctr);
463 if (rc)
464 goto out;
346 465
347 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 466 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
348 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 467 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
468 if (rc)
469 goto out;
349 470
350 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 471 /* for partial completion, copy following for next
351 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 472 * entry into loop...
352 if (rc) 473 */
353 goto out; 474 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
475 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
476 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
477 memcpy(csbcpb->cpb.aes_ccm.in_s0,
478 csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
354 479
355 atomic_inc(&(nx_ctx->stats->aes_ops)); 480 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
356 atomic64_add(csbcpb->csb.processed_byte_count, 481
357 &(nx_ctx->stats->aes_bytes)); 482 /* update stats */
483 atomic_inc(&(nx_ctx->stats->aes_ops));
484 atomic64_add(csbcpb->csb.processed_byte_count,
485 &(nx_ctx->stats->aes_bytes));
486
487 processed += to_process;
488
489 } while (processed < nbytes);
358 490
359 /* copy out the auth tag */ 491 /* copy out the auth tag */
360 scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, 492 scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
361 req->dst, nbytes, authsize, 493 req->dst, nbytes, authsize,
362 SCATTERWALK_TO_SG); 494 SCATTERWALK_TO_SG);
495
363out: 496out:
364 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 497 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
365 return rc; 498 return rc;