aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMark A. Greer <mgreer@animalcreek.com>2012-12-21 12:04:04 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2013-01-05 05:43:56 -0500
commitdfd061d5a8f5a6d89c77d23693a63038ff8cbcd8 (patch)
treeb499446322453bbb204679c4fca33debbccb9cad /drivers
parent3b3f440023b3809c8eabec681768a4bcee15f2b4 (diff)
crypto: omap-sham - Add code to use dmaengine API
Add code to use the new dmaengine API alongside the existing DMA code that uses the private OMAP DMA API. The API to use is chosen by defining or undefining 'OMAP_SHAM_DMA_PRIVATE'. This is a transitional change and the code that uses the private DMA API will be removed in an upcoming commit. CC: Russell King <rmk+kernel@arm.linux.org.uk> CC: Dmitry Kasatkin <dmitry.kasatkin@intel.com> Signed-off-by: Mark A. Greer <mgreer@animalcreek.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/omap-sham.c154
1 files changed, 145 insertions, 9 deletions
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 9c3b096e15e8..f54ceb8f5b24 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -13,6 +13,8 @@
13 * Some ideas are from old omap-sha1-md5.c driver. 13 * Some ideas are from old omap-sha1-md5.c driver.
14 */ 14 */
15 15
16#define OMAP_SHAM_DMA_PRIVATE
17
16#define pr_fmt(fmt) "%s: " fmt, __func__ 18#define pr_fmt(fmt) "%s: " fmt, __func__
17 19
18#include <linux/err.h> 20#include <linux/err.h>
@@ -27,6 +29,8 @@
27#include <linux/platform_device.h> 29#include <linux/platform_device.h>
28#include <linux/scatterlist.h> 30#include <linux/scatterlist.h>
29#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/dmaengine.h>
33#include <linux/omap-dma.h>
30#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
31#include <linux/delay.h> 35#include <linux/delay.h>
32#include <linux/crypto.h> 36#include <linux/crypto.h>
@@ -37,15 +41,15 @@
37#include <crypto/hash.h> 41#include <crypto/hash.h>
38#include <crypto/internal/hash.h> 42#include <crypto/internal/hash.h>
39 43
40#include <linux/omap-dma.h>
41#include <mach/irqs.h>
42
43#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04)) 44#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
44#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04)) 45#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
45 46
46#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE 47#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
47#define MD5_DIGEST_SIZE 16 48#define MD5_DIGEST_SIZE 16
48 49
50#define DST_MAXBURST 16
51#define DMA_MIN (DST_MAXBURST * sizeof(u32))
52
49#define SHA_REG_DIGCNT 0x14 53#define SHA_REG_DIGCNT 0x14
50 54
51#define SHA_REG_CTRL 0x18 55#define SHA_REG_CTRL 0x18
@@ -109,6 +113,9 @@ struct omap_sham_reqctx {
109 113
110 /* walk state */ 114 /* walk state */
111 struct scatterlist *sg; 115 struct scatterlist *sg;
116#ifndef OMAP_SHAM_DMA_PRIVATE
117 struct scatterlist sgl;
118#endif
112 unsigned int offset; /* offset in current sg */ 119 unsigned int offset; /* offset in current sg */
113 unsigned int total; /* total request */ 120 unsigned int total; /* total request */
114 121
@@ -142,8 +149,12 @@ struct omap_sham_dev {
142 int irq; 149 int irq;
143 spinlock_t lock; 150 spinlock_t lock;
144 int err; 151 int err;
152#ifdef OMAP_SHAM_DMA_PRIVATE
145 int dma; 153 int dma;
146 int dma_lch; 154 int dma_lch;
155#else
156 struct dma_chan *dma_lch;
157#endif
147 struct tasklet_struct done_task; 158 struct tasklet_struct done_task;
148 159
149 unsigned long flags; 160 unsigned long flags;
@@ -313,15 +324,32 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
313 return -EINPROGRESS; 324 return -EINPROGRESS;
314} 325}
315 326
327#ifndef OMAP_SHAM_DMA_PRIVATE
328static void omap_sham_dma_callback(void *param)
329{
330 struct omap_sham_dev *dd = param;
331
332 set_bit(FLAGS_DMA_READY, &dd->flags);
333 tasklet_schedule(&dd->done_task);
334}
335#endif
336
316static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, 337static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
317 size_t length, int final) 338 size_t length, int final, int is_sg)
318{ 339{
319 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 340 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
341#ifdef OMAP_SHAM_DMA_PRIVATE
320 int len32; 342 int len32;
343#else
344 struct dma_async_tx_descriptor *tx;
345 struct dma_slave_config cfg;
346 int len32, ret;
347#endif
321 348
322 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", 349 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
323 ctx->digcnt, length, final); 350 ctx->digcnt, length, final);
324 351
352#ifdef OMAP_SHAM_DMA_PRIVATE
325 len32 = DIV_ROUND_UP(length, sizeof(u32)); 353 len32 = DIV_ROUND_UP(length, sizeof(u32));
326 354
327 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 355 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
@@ -331,6 +359,50 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
331 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 359 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
332 dma_addr, 0, 0); 360 dma_addr, 0, 0);
333 361
362#else
363 memset(&cfg, 0, sizeof(cfg));
364
365 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(0);
366 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
367 cfg.dst_maxburst = DST_MAXBURST;
368
369 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
370 if (ret) {
371 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
372 return ret;
373 }
374
375 len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
376
377 if (is_sg) {
378 /*
379 * The SG entry passed in may not have the 'length' member
380 * set correctly so use a local SG entry (sgl) with the
381 * proper value for 'length' instead. If this is not done,
382 * the dmaengine may try to DMA the incorrect amount of data.
383 */
384 sg_init_table(&ctx->sgl, 1);
385 ctx->sgl.page_link = ctx->sg->page_link;
386 ctx->sgl.offset = ctx->sg->offset;
387 sg_dma_len(&ctx->sgl) = len32;
388 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
389
390 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
391 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
392 } else {
393 tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
394 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
395 }
396
397 if (!tx) {
398 dev_err(dd->dev, "prep_slave_sg/single() failed\n");
399 return -EINVAL;
400 }
401
402 tx->callback = omap_sham_dma_callback;
403 tx->callback_param = dd;
404#endif
405
334 omap_sham_write_ctrl(dd, length, final, 1); 406 omap_sham_write_ctrl(dd, length, final, 1);
335 407
336 ctx->digcnt += length; 408 ctx->digcnt += length;
@@ -340,7 +412,12 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
340 412
341 set_bit(FLAGS_DMA_ACTIVE, &dd->flags); 413 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
342 414
415#ifdef OMAP_SHAM_DMA_PRIVATE
343 omap_start_dma(dd->dma_lch); 416 omap_start_dma(dd->dma_lch);
417#else
418 dmaengine_submit(tx);
419 dma_async_issue_pending(dd->dma_lch);
420#endif
344 421
345 return -EINPROGRESS; 422 return -EINPROGRESS;
346} 423}
@@ -387,6 +464,8 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
387 struct omap_sham_reqctx *ctx, 464 struct omap_sham_reqctx *ctx,
388 size_t length, int final) 465 size_t length, int final)
389{ 466{
467 int ret;
468
390 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, 469 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
391 DMA_TO_DEVICE); 470 DMA_TO_DEVICE);
392 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 471 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
@@ -396,8 +475,12 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
396 475
397 ctx->flags &= ~BIT(FLAGS_SG); 476 ctx->flags &= ~BIT(FLAGS_SG);
398 477
399 /* next call does not fail... so no unmap in the case of error */ 478 ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
400 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); 479 if (ret)
480 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
481 DMA_TO_DEVICE);
482
483 return ret;
401} 484}
402 485
403static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) 486static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
@@ -432,6 +515,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
432 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 515 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
433 unsigned int length, final, tail; 516 unsigned int length, final, tail;
434 struct scatterlist *sg; 517 struct scatterlist *sg;
518 int ret;
435 519
436 if (!ctx->total) 520 if (!ctx->total)
437 return 0; 521 return 0;
@@ -439,6 +523,17 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
439 if (ctx->bufcnt || ctx->offset) 523 if (ctx->bufcnt || ctx->offset)
440 return omap_sham_update_dma_slow(dd); 524 return omap_sham_update_dma_slow(dd);
441 525
526#ifndef OMAP_SHAM_DMA_PRIVATE
527 /*
528 * Don't use the sg interface when the transfer size is less
529 * than the number of elements in a DMA frame. Otherwise,
530 * the dmaengine infrastructure will calculate that it needs
531 * to transfer 0 frames which ultimately fails.
532 */
533 if (ctx->total < (DST_MAXBURST * sizeof(u32)))
534 return omap_sham_update_dma_slow(dd);
535#endif
536
442 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", 537 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
443 ctx->digcnt, ctx->bufcnt, ctx->total); 538 ctx->digcnt, ctx->bufcnt, ctx->total);
444 539
@@ -476,8 +571,11 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
476 571
477 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; 572 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
478 573
479 /* next call does not fail... so no unmap in the case of error */ 574 ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
480 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); 575 if (ret)
576 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
577
578 return ret;
481} 579}
482 580
483static int omap_sham_update_cpu(struct omap_sham_dev *dd) 581static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -496,7 +594,12 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
496{ 594{
497 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 595 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
498 596
597#ifdef OMAP_SHAM_DMA_PRIVATE
499 omap_stop_dma(dd->dma_lch); 598 omap_stop_dma(dd->dma_lch);
599#else
600 dmaengine_terminate_all(dd->dma_lch);
601#endif
602
500 if (ctx->flags & BIT(FLAGS_SG)) { 603 if (ctx->flags & BIT(FLAGS_SG)) {
501 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 604 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
502 if (ctx->sg->length == ctx->offset) { 605 if (ctx->sg->length == ctx->offset) {
@@ -583,7 +686,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
583 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 686 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
584 int err = 0, use_dma = 1; 687 int err = 0, use_dma = 1;
585 688
586 if (ctx->bufcnt <= 64) 689 if (ctx->bufcnt <= DMA_MIN)
587 /* faster to handle last block with cpu */ 690 /* faster to handle last block with cpu */
588 use_dma = 0; 691 use_dma = 0;
589 692
@@ -699,6 +802,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
699 if (err) 802 if (err)
700 goto err1; 803 goto err1;
701 804
805#ifdef OMAP_SHAM_DMA_PRIVATE
702 omap_set_dma_dest_params(dd->dma_lch, 0, 806 omap_set_dma_dest_params(dd->dma_lch, 0,
703 OMAP_DMA_AMODE_CONSTANT, 807 OMAP_DMA_AMODE_CONSTANT,
704 dd->phys_base + SHA_REG_DIN(0), 0, 16); 808 dd->phys_base + SHA_REG_DIN(0), 0, 16);
@@ -708,6 +812,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
708 812
709 omap_set_dma_src_burst_mode(dd->dma_lch, 813 omap_set_dma_src_burst_mode(dd->dma_lch,
710 OMAP_DMA_DATA_BURST_4); 814 OMAP_DMA_DATA_BURST_4);
815#endif
711 816
712 if (ctx->digcnt) 817 if (ctx->digcnt)
713 /* request has changed - restore hash */ 818 /* request has changed - restore hash */
@@ -1099,6 +1204,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1099 return IRQ_HANDLED; 1204 return IRQ_HANDLED;
1100} 1205}
1101 1206
1207#ifdef OMAP_SHAM_DMA_PRIVATE
1102static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) 1208static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1103{ 1209{
1104 struct omap_sham_dev *dd = data; 1210 struct omap_sham_dev *dd = data;
@@ -1136,12 +1242,17 @@ static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
1136 dd->dma_lch = -1; 1242 dd->dma_lch = -1;
1137 } 1243 }
1138} 1244}
1245#endif
1139 1246
1140static int __devinit omap_sham_probe(struct platform_device *pdev) 1247static int __devinit omap_sham_probe(struct platform_device *pdev)
1141{ 1248{
1142 struct omap_sham_dev *dd; 1249 struct omap_sham_dev *dd;
1143 struct device *dev = &pdev->dev; 1250 struct device *dev = &pdev->dev;
1144 struct resource *res; 1251 struct resource *res;
1252#ifndef OMAP_SHAM_DMA_PRIVATE
1253 dma_cap_mask_t mask;
1254 unsigned dma_chan;
1255#endif
1145 int err, i, j; 1256 int err, i, j;
1146 1257
1147 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); 1258 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
@@ -1176,7 +1287,11 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
1176 err = -ENODEV; 1287 err = -ENODEV;
1177 goto res_err; 1288 goto res_err;
1178 } 1289 }
1290#ifdef OMAP_SHAM_DMA_PRIVATE
1179 dd->dma = res->start; 1291 dd->dma = res->start;
1292#else
1293 dma_chan = res->start;
1294#endif
1180 1295
1181 /* Get the IRQ */ 1296 /* Get the IRQ */
1182 dd->irq = platform_get_irq(pdev, 0); 1297 dd->irq = platform_get_irq(pdev, 0);
@@ -1193,9 +1308,22 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
1193 goto res_err; 1308 goto res_err;
1194 } 1309 }
1195 1310
1311#ifdef OMAP_SHAM_DMA_PRIVATE
1196 err = omap_sham_dma_init(dd); 1312 err = omap_sham_dma_init(dd);
1197 if (err) 1313 if (err)
1198 goto dma_err; 1314 goto dma_err;
1315#else
1316 dma_cap_zero(mask);
1317 dma_cap_set(DMA_SLAVE, mask);
1318
1319 dd->dma_lch = dma_request_channel(mask, omap_dma_filter_fn, &dma_chan);
1320 if (!dd->dma_lch) {
1321 dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
1322 dma_chan);
1323 err = -ENXIO;
1324 goto dma_err;
1325 }
1326#endif
1199 1327
1200 dd->io_base = ioremap(dd->phys_base, SZ_4K); 1328 dd->io_base = ioremap(dd->phys_base, SZ_4K);
1201 if (!dd->io_base) { 1329 if (!dd->io_base) {
@@ -1231,7 +1359,11 @@ err_algs:
1231 iounmap(dd->io_base); 1359 iounmap(dd->io_base);
1232 pm_runtime_disable(dev); 1360 pm_runtime_disable(dev);
1233io_err: 1361io_err:
1362#ifdef OMAP_SHAM_DMA_PRIVATE
1234 omap_sham_dma_cleanup(dd); 1363 omap_sham_dma_cleanup(dd);
1364#else
1365 dma_release_channel(dd->dma_lch);
1366#endif
1235dma_err: 1367dma_err:
1236 if (dd->irq >= 0) 1368 if (dd->irq >= 0)
1237 free_irq(dd->irq, dd); 1369 free_irq(dd->irq, dd);
@@ -1260,7 +1392,11 @@ static int __devexit omap_sham_remove(struct platform_device *pdev)
1260 tasklet_kill(&dd->done_task); 1392 tasklet_kill(&dd->done_task);
1261 iounmap(dd->io_base); 1393 iounmap(dd->io_base);
1262 pm_runtime_disable(&pdev->dev); 1394 pm_runtime_disable(&pdev->dev);
1395#ifdef OMAP_SHAM_DMA_PRIVATE
1263 omap_sham_dma_cleanup(dd); 1396 omap_sham_dma_cleanup(dd);
1397#else
1398 dma_release_channel(dd->dma_lch);
1399#endif
1264 if (dd->irq >= 0) 1400 if (dd->irq >= 0)
1265 free_irq(dd->irq, dd); 1401 free_irq(dd->irq, dd);
1266 kfree(dd); 1402 kfree(dd);