diff options
author | Joel Fernandes <joelf@ti.com> | 2013-08-17 22:42:27 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2013-08-21 07:28:00 -0400 |
commit | 0c063a22d50606e5e405e954be2de3810372dc27 (patch) | |
tree | 487b7ed25983e6746dd109319df9126ec1c2803a /drivers/crypto/omap-aes.c | |
parent | 0a641712ef9459df5d8c19fc19e686887257b3e3 (diff) |
crypto: omap-aes - Remove previously used intermediate buffers
Intermdiate buffers were allocated, mapped and used for DMA. These are no
longer required as we use the SGs from crypto layer directly in previous
commits in the series. Also along with it, remove the logic for copying SGs
etc as they are no longer used, and all the associated variables in omap_aes_device.
Signed-off-by: Joel Fernandes <joelf@ti.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/omap-aes.c')
-rw-r--r-- | drivers/crypto/omap-aes.c | 90 |
1 files changed, 0 insertions, 90 deletions
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 4ed2a8c40394..81f0e848b8d1 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -150,25 +150,13 @@ struct omap_aes_dev { | |||
150 | struct ablkcipher_request *req; | 150 | struct ablkcipher_request *req; |
151 | size_t total; | 151 | size_t total; |
152 | struct scatterlist *in_sg; | 152 | struct scatterlist *in_sg; |
153 | struct scatterlist in_sgl; | ||
154 | size_t in_offset; | ||
155 | struct scatterlist *out_sg; | 153 | struct scatterlist *out_sg; |
156 | struct scatterlist out_sgl; | ||
157 | size_t out_offset; | ||
158 | |||
159 | size_t buflen; | ||
160 | void *buf_in; | ||
161 | size_t dma_size; | ||
162 | int dma_in; | 154 | int dma_in; |
163 | struct dma_chan *dma_lch_in; | 155 | struct dma_chan *dma_lch_in; |
164 | dma_addr_t dma_addr_in; | ||
165 | void *buf_out; | ||
166 | int dma_out; | 156 | int dma_out; |
167 | struct dma_chan *dma_lch_out; | 157 | struct dma_chan *dma_lch_out; |
168 | int in_sg_len; | 158 | int in_sg_len; |
169 | int out_sg_len; | 159 | int out_sg_len; |
170 | dma_addr_t dma_addr_out; | ||
171 | |||
172 | const struct omap_aes_pdata *pdata; | 160 | const struct omap_aes_pdata *pdata; |
173 | }; | 161 | }; |
174 | 162 | ||
@@ -347,33 +335,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) | |||
347 | dd->dma_lch_out = NULL; | 335 | dd->dma_lch_out = NULL; |
348 | dd->dma_lch_in = NULL; | 336 | dd->dma_lch_in = NULL; |
349 | 337 | ||
350 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | ||
351 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | ||
352 | dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; | ||
353 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | ||
354 | |||
355 | if (!dd->buf_in || !dd->buf_out) { | ||
356 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
357 | goto err_alloc; | ||
358 | } | ||
359 | |||
360 | /* MAP here */ | ||
361 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, | ||
362 | DMA_TO_DEVICE); | ||
363 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
364 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
365 | err = -EINVAL; | ||
366 | goto err_map_in; | ||
367 | } | ||
368 | |||
369 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, | ||
370 | DMA_FROM_DEVICE); | ||
371 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | ||
372 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
373 | err = -EINVAL; | ||
374 | goto err_map_out; | ||
375 | } | ||
376 | |||
377 | dma_cap_zero(mask); | 338 | dma_cap_zero(mask); |
378 | dma_cap_set(DMA_SLAVE, mask); | 339 | dma_cap_set(DMA_SLAVE, mask); |
379 | 340 | ||
@@ -400,14 +361,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) | |||
400 | err_dma_out: | 361 | err_dma_out: |
401 | dma_release_channel(dd->dma_lch_in); | 362 | dma_release_channel(dd->dma_lch_in); |
402 | err_dma_in: | 363 | err_dma_in: |
403 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
404 | DMA_FROM_DEVICE); | ||
405 | err_map_out: | ||
406 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | ||
407 | err_map_in: | ||
408 | free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); | ||
409 | free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); | ||
410 | err_alloc: | ||
411 | if (err) | 364 | if (err) |
412 | pr_err("error: %d\n", err); | 365 | pr_err("error: %d\n", err); |
413 | return err; | 366 | return err; |
@@ -417,11 +370,6 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) | |||
417 | { | 370 | { |
418 | dma_release_channel(dd->dma_lch_out); | 371 | dma_release_channel(dd->dma_lch_out); |
419 | dma_release_channel(dd->dma_lch_in); | 372 | dma_release_channel(dd->dma_lch_in); |
420 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
421 | DMA_FROM_DEVICE); | ||
422 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | ||
423 | free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); | ||
424 | free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); | ||
425 | } | 373 | } |
426 | 374 | ||
427 | static void sg_copy_buf(void *buf, struct scatterlist *sg, | 375 | static void sg_copy_buf(void *buf, struct scatterlist *sg, |
@@ -438,42 +386,6 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg, | |||
438 | scatterwalk_done(&walk, out, 0); | 386 | scatterwalk_done(&walk, out, 0); |
439 | } | 387 | } |
440 | 388 | ||
441 | static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, | ||
442 | size_t buflen, size_t total, int out) | ||
443 | { | ||
444 | unsigned int count, off = 0; | ||
445 | |||
446 | while (buflen && total) { | ||
447 | count = min((*sg)->length - *offset, total); | ||
448 | count = min(count, buflen); | ||
449 | |||
450 | if (!count) | ||
451 | return off; | ||
452 | |||
453 | /* | ||
454 | * buflen and total are AES_BLOCK_SIZE size aligned, | ||
455 | * so count should be also aligned | ||
456 | */ | ||
457 | |||
458 | sg_copy_buf(buf + off, *sg, *offset, count, out); | ||
459 | |||
460 | off += count; | ||
461 | buflen -= count; | ||
462 | *offset += count; | ||
463 | total -= count; | ||
464 | |||
465 | if (*offset == (*sg)->length) { | ||
466 | *sg = sg_next(*sg); | ||
467 | if (*sg) | ||
468 | *offset = 0; | ||
469 | else | ||
470 | total = 0; | ||
471 | } | ||
472 | } | ||
473 | |||
474 | return off; | ||
475 | } | ||
476 | |||
477 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, | 389 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, |
478 | struct scatterlist *in_sg, struct scatterlist *out_sg, | 390 | struct scatterlist *in_sg, struct scatterlist *out_sg, |
479 | int in_sg_len, int out_sg_len) | 391 | int in_sg_len, int out_sg_len) |
@@ -637,9 +549,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, | |||
637 | /* assign new request to device */ | 549 | /* assign new request to device */ |
638 | dd->req = req; | 550 | dd->req = req; |
639 | dd->total = req->nbytes; | 551 | dd->total = req->nbytes; |
640 | dd->in_offset = 0; | ||
641 | dd->in_sg = req->src; | 552 | dd->in_sg = req->src; |
642 | dd->out_offset = 0; | ||
643 | dd->out_sg = req->dst; | 553 | dd->out_sg = req->dst; |
644 | 554 | ||
645 | dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); | 555 | dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); |