diff options
author | Mark A. Greer <mgreer@animalcreek.com> | 2012-12-21 12:04:05 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2013-01-05 05:43:57 -0500 |
commit | dd49a69e8eb1423e4d434081a7785bc1b8b8948a (patch) | |
tree | 9e282e8a1213640c4fa2b76a19625900256f3888 /drivers/crypto/omap-sham.c | |
parent | dfd061d5a8f5a6d89c77d23693a63038ff8cbcd8 (diff) |
crypto: omap-sham - Remove usage of private DMA API
Remove usage of the private OMAP DMA API.
The dmaengine API will be used instead.
CC: Russell King <rmk+kernel@arm.linux.org.uk>
CC: Dmitry Kasatkin <dmitry.kasatkin@intel.com>
Signed-off-by: Mark A. Greer <mgreer@animalcreek.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/omap-sham.c')
-rw-r--r-- | drivers/crypto/omap-sham.c | 109 |
1 files changed, 0 insertions, 109 deletions
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index f54ceb8f5b24..f6b270ed7d62 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -13,8 +13,6 @@ | |||
13 | * Some ideas are from old omap-sha1-md5.c driver. | 13 | * Some ideas are from old omap-sha1-md5.c driver. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define OMAP_SHAM_DMA_PRIVATE | ||
17 | |||
18 | #define pr_fmt(fmt) "%s: " fmt, __func__ | 16 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
19 | 17 | ||
20 | #include <linux/err.h> | 18 | #include <linux/err.h> |
@@ -113,9 +111,7 @@ struct omap_sham_reqctx { | |||
113 | 111 | ||
114 | /* walk state */ | 112 | /* walk state */ |
115 | struct scatterlist *sg; | 113 | struct scatterlist *sg; |
116 | #ifndef OMAP_SHAM_DMA_PRIVATE | ||
117 | struct scatterlist sgl; | 114 | struct scatterlist sgl; |
118 | #endif | ||
119 | unsigned int offset; /* offset in current sg */ | 115 | unsigned int offset; /* offset in current sg */ |
120 | unsigned int total; /* total request */ | 116 | unsigned int total; /* total request */ |
121 | 117 | ||
@@ -149,12 +145,7 @@ struct omap_sham_dev { | |||
149 | int irq; | 145 | int irq; |
150 | spinlock_t lock; | 146 | spinlock_t lock; |
151 | int err; | 147 | int err; |
152 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
153 | int dma; | ||
154 | int dma_lch; | ||
155 | #else | ||
156 | struct dma_chan *dma_lch; | 148 | struct dma_chan *dma_lch; |
157 | #endif | ||
158 | struct tasklet_struct done_task; | 149 | struct tasklet_struct done_task; |
159 | 150 | ||
160 | unsigned long flags; | 151 | unsigned long flags; |
@@ -324,7 +315,6 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
324 | return -EINPROGRESS; | 315 | return -EINPROGRESS; |
325 | } | 316 | } |
326 | 317 | ||
327 | #ifndef OMAP_SHAM_DMA_PRIVATE | ||
328 | static void omap_sham_dma_callback(void *param) | 318 | static void omap_sham_dma_callback(void *param) |
329 | { | 319 | { |
330 | struct omap_sham_dev *dd = param; | 320 | struct omap_sham_dev *dd = param; |
@@ -332,34 +322,18 @@ static void omap_sham_dma_callback(void *param) | |||
332 | set_bit(FLAGS_DMA_READY, &dd->flags); | 322 | set_bit(FLAGS_DMA_READY, &dd->flags); |
333 | tasklet_schedule(&dd->done_task); | 323 | tasklet_schedule(&dd->done_task); |
334 | } | 324 | } |
335 | #endif | ||
336 | 325 | ||
337 | static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | 326 | static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, |
338 | size_t length, int final, int is_sg) | 327 | size_t length, int final, int is_sg) |
339 | { | 328 | { |
340 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 329 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
341 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
342 | int len32; | ||
343 | #else | ||
344 | struct dma_async_tx_descriptor *tx; | 330 | struct dma_async_tx_descriptor *tx; |
345 | struct dma_slave_config cfg; | 331 | struct dma_slave_config cfg; |
346 | int len32, ret; | 332 | int len32, ret; |
347 | #endif | ||
348 | 333 | ||
349 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", | 334 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", |
350 | ctx->digcnt, length, final); | 335 | ctx->digcnt, length, final); |
351 | 336 | ||
352 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
353 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
354 | |||
355 | omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, | ||
356 | 1, OMAP_DMA_SYNC_PACKET, dd->dma, | ||
357 | OMAP_DMA_DST_SYNC_PREFETCH); | ||
358 | |||
359 | omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, | ||
360 | dma_addr, 0, 0); | ||
361 | |||
362 | #else | ||
363 | memset(&cfg, 0, sizeof(cfg)); | 337 | memset(&cfg, 0, sizeof(cfg)); |
364 | 338 | ||
365 | cfg.dst_addr = dd->phys_base + SHA_REG_DIN(0); | 339 | cfg.dst_addr = dd->phys_base + SHA_REG_DIN(0); |
@@ -401,7 +375,6 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
401 | 375 | ||
402 | tx->callback = omap_sham_dma_callback; | 376 | tx->callback = omap_sham_dma_callback; |
403 | tx->callback_param = dd; | 377 | tx->callback_param = dd; |
404 | #endif | ||
405 | 378 | ||
406 | omap_sham_write_ctrl(dd, length, final, 1); | 379 | omap_sham_write_ctrl(dd, length, final, 1); |
407 | 380 | ||
@@ -412,12 +385,8 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
412 | 385 | ||
413 | set_bit(FLAGS_DMA_ACTIVE, &dd->flags); | 386 | set_bit(FLAGS_DMA_ACTIVE, &dd->flags); |
414 | 387 | ||
415 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
416 | omap_start_dma(dd->dma_lch); | ||
417 | #else | ||
418 | dmaengine_submit(tx); | 388 | dmaengine_submit(tx); |
419 | dma_async_issue_pending(dd->dma_lch); | 389 | dma_async_issue_pending(dd->dma_lch); |
420 | #endif | ||
421 | 390 | ||
422 | return -EINPROGRESS; | 391 | return -EINPROGRESS; |
423 | } | 392 | } |
@@ -523,7 +492,6 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
523 | if (ctx->bufcnt || ctx->offset) | 492 | if (ctx->bufcnt || ctx->offset) |
524 | return omap_sham_update_dma_slow(dd); | 493 | return omap_sham_update_dma_slow(dd); |
525 | 494 | ||
526 | #ifndef OMAP_SHAM_DMA_PRIVATE | ||
527 | /* | 495 | /* |
528 | * Don't use the sg interface when the transfer size is less | 496 | * Don't use the sg interface when the transfer size is less |
529 | * than the number of elements in a DMA frame. Otherwise, | 497 | * than the number of elements in a DMA frame. Otherwise, |
@@ -532,7 +500,6 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
532 | */ | 500 | */ |
533 | if (ctx->total < (DST_MAXBURST * sizeof(u32))) | 501 | if (ctx->total < (DST_MAXBURST * sizeof(u32))) |
534 | return omap_sham_update_dma_slow(dd); | 502 | return omap_sham_update_dma_slow(dd); |
535 | #endif | ||
536 | 503 | ||
537 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | 504 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", |
538 | ctx->digcnt, ctx->bufcnt, ctx->total); | 505 | ctx->digcnt, ctx->bufcnt, ctx->total); |
@@ -594,11 +561,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | |||
594 | { | 561 | { |
595 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 562 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
596 | 563 | ||
597 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
598 | omap_stop_dma(dd->dma_lch); | ||
599 | #else | ||
600 | dmaengine_terminate_all(dd->dma_lch); | 564 | dmaengine_terminate_all(dd->dma_lch); |
601 | #endif | ||
602 | 565 | ||
603 | if (ctx->flags & BIT(FLAGS_SG)) { | 566 | if (ctx->flags & BIT(FLAGS_SG)) { |
604 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | 567 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); |
@@ -802,18 +765,6 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
802 | if (err) | 765 | if (err) |
803 | goto err1; | 766 | goto err1; |
804 | 767 | ||
805 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
806 | omap_set_dma_dest_params(dd->dma_lch, 0, | ||
807 | OMAP_DMA_AMODE_CONSTANT, | ||
808 | dd->phys_base + SHA_REG_DIN(0), 0, 16); | ||
809 | |||
810 | omap_set_dma_dest_burst_mode(dd->dma_lch, | ||
811 | OMAP_DMA_DATA_BURST_16); | ||
812 | |||
813 | omap_set_dma_src_burst_mode(dd->dma_lch, | ||
814 | OMAP_DMA_DATA_BURST_4); | ||
815 | #endif | ||
816 | |||
817 | if (ctx->digcnt) | 768 | if (ctx->digcnt) |
818 | /* request has changed - restore hash */ | 769 | /* request has changed - restore hash */ |
819 | omap_sham_copy_hash(req, 0); | 770 | omap_sham_copy_hash(req, 0); |
@@ -1204,55 +1155,13 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) | |||
1204 | return IRQ_HANDLED; | 1155 | return IRQ_HANDLED; |
1205 | } | 1156 | } |
1206 | 1157 | ||
1207 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
1208 | static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | ||
1209 | { | ||
1210 | struct omap_sham_dev *dd = data; | ||
1211 | |||
1212 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { | ||
1213 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); | ||
1214 | dd->err = -EIO; | ||
1215 | clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */ | ||
1216 | } | ||
1217 | |||
1218 | set_bit(FLAGS_DMA_READY, &dd->flags); | ||
1219 | tasklet_schedule(&dd->done_task); | ||
1220 | } | ||
1221 | |||
1222 | static int omap_sham_dma_init(struct omap_sham_dev *dd) | ||
1223 | { | ||
1224 | int err; | ||
1225 | |||
1226 | dd->dma_lch = -1; | ||
1227 | |||
1228 | err = omap_request_dma(dd->dma, dev_name(dd->dev), | ||
1229 | omap_sham_dma_callback, dd, &dd->dma_lch); | ||
1230 | if (err) { | ||
1231 | dev_err(dd->dev, "Unable to request DMA channel\n"); | ||
1232 | return err; | ||
1233 | } | ||
1234 | |||
1235 | return 0; | ||
1236 | } | ||
1237 | |||
1238 | static void omap_sham_dma_cleanup(struct omap_sham_dev *dd) | ||
1239 | { | ||
1240 | if (dd->dma_lch >= 0) { | ||
1241 | omap_free_dma(dd->dma_lch); | ||
1242 | dd->dma_lch = -1; | ||
1243 | } | ||
1244 | } | ||
1245 | #endif | ||
1246 | |||
1247 | static int __devinit omap_sham_probe(struct platform_device *pdev) | 1158 | static int __devinit omap_sham_probe(struct platform_device *pdev) |
1248 | { | 1159 | { |
1249 | struct omap_sham_dev *dd; | 1160 | struct omap_sham_dev *dd; |
1250 | struct device *dev = &pdev->dev; | 1161 | struct device *dev = &pdev->dev; |
1251 | struct resource *res; | 1162 | struct resource *res; |
1252 | #ifndef OMAP_SHAM_DMA_PRIVATE | ||
1253 | dma_cap_mask_t mask; | 1163 | dma_cap_mask_t mask; |
1254 | unsigned dma_chan; | 1164 | unsigned dma_chan; |
1255 | #endif | ||
1256 | int err, i, j; | 1165 | int err, i, j; |
1257 | 1166 | ||
1258 | dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); | 1167 | dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); |
@@ -1287,11 +1196,7 @@ static int __devinit omap_sham_probe(struct platform_device *pdev) | |||
1287 | err = -ENODEV; | 1196 | err = -ENODEV; |
1288 | goto res_err; | 1197 | goto res_err; |
1289 | } | 1198 | } |
1290 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
1291 | dd->dma = res->start; | ||
1292 | #else | ||
1293 | dma_chan = res->start; | 1199 | dma_chan = res->start; |
1294 | #endif | ||
1295 | 1200 | ||
1296 | /* Get the IRQ */ | 1201 | /* Get the IRQ */ |
1297 | dd->irq = platform_get_irq(pdev, 0); | 1202 | dd->irq = platform_get_irq(pdev, 0); |
@@ -1308,11 +1213,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev) | |||
1308 | goto res_err; | 1213 | goto res_err; |
1309 | } | 1214 | } |
1310 | 1215 | ||
1311 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
1312 | err = omap_sham_dma_init(dd); | ||
1313 | if (err) | ||
1314 | goto dma_err; | ||
1315 | #else | ||
1316 | dma_cap_zero(mask); | 1216 | dma_cap_zero(mask); |
1317 | dma_cap_set(DMA_SLAVE, mask); | 1217 | dma_cap_set(DMA_SLAVE, mask); |
1318 | 1218 | ||
@@ -1323,7 +1223,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev) | |||
1323 | err = -ENXIO; | 1223 | err = -ENXIO; |
1324 | goto dma_err; | 1224 | goto dma_err; |
1325 | } | 1225 | } |
1326 | #endif | ||
1327 | 1226 | ||
1328 | dd->io_base = ioremap(dd->phys_base, SZ_4K); | 1227 | dd->io_base = ioremap(dd->phys_base, SZ_4K); |
1329 | if (!dd->io_base) { | 1228 | if (!dd->io_base) { |
@@ -1359,11 +1258,7 @@ err_algs: | |||
1359 | iounmap(dd->io_base); | 1258 | iounmap(dd->io_base); |
1360 | pm_runtime_disable(dev); | 1259 | pm_runtime_disable(dev); |
1361 | io_err: | 1260 | io_err: |
1362 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
1363 | omap_sham_dma_cleanup(dd); | ||
1364 | #else | ||
1365 | dma_release_channel(dd->dma_lch); | 1261 | dma_release_channel(dd->dma_lch); |
1366 | #endif | ||
1367 | dma_err: | 1262 | dma_err: |
1368 | if (dd->irq >= 0) | 1263 | if (dd->irq >= 0) |
1369 | free_irq(dd->irq, dd); | 1264 | free_irq(dd->irq, dd); |
@@ -1392,11 +1287,7 @@ static int __devexit omap_sham_remove(struct platform_device *pdev) | |||
1392 | tasklet_kill(&dd->done_task); | 1287 | tasklet_kill(&dd->done_task); |
1393 | iounmap(dd->io_base); | 1288 | iounmap(dd->io_base); |
1394 | pm_runtime_disable(&pdev->dev); | 1289 | pm_runtime_disable(&pdev->dev); |
1395 | #ifdef OMAP_SHAM_DMA_PRIVATE | ||
1396 | omap_sham_dma_cleanup(dd); | ||
1397 | #else | ||
1398 | dma_release_channel(dd->dma_lch); | 1290 | dma_release_channel(dd->dma_lch); |
1399 | #endif | ||
1400 | if (dd->irq >= 0) | 1291 | if (dd->irq >= 0) |
1401 | free_irq(dd->irq, dd); | 1292 | free_irq(dd->irq, dd); |
1402 | kfree(dd); | 1293 | kfree(dd); |