aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 13:25:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 13:25:58 -0500
commit27d189c02ba25851973c8582e419c0bded9f7e5b (patch)
treebe142d664bc4e3cec7ab2878a243343f46e897ee /drivers/crypto
parenta1703154200c390ab03c10224c586e815d3e31e8 (diff)
parent55db8387a5e8d07407f0b7c6b2526417a2bc6243 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (46 commits) hwrng: via_rng - Fix memory scribbling on some CPUs crypto: padlock - Move padlock.h into include/crypto hwrng: via_rng - Fix asm constraints crypto: n2 - use __devexit not __exit in n2_unregister_algs crypto: mark crypto workqueues CPU_INTENSIVE crypto: mv_cesa - dont return PTR_ERR() of wrong pointer crypto: ripemd - Set module author and update email address crypto: omap-sham - backlog handling fix crypto: gf128mul - Remove experimental tag crypto: af_alg - fix af_alg memory_allocated data type crypto: aesni-intel - Fixed build with binutils 2.16 crypto: af_alg - Make sure sk_security is initialized on accept()ed sockets net: Add missing lockdep class names for af_alg include: Install linux/if_alg.h for user-space crypto API crypto: omap-aes - checkpatch --file warning fixes crypto: omap-aes - initialize aes module once per request crypto: omap-aes - unnecessary code removed crypto: omap-aes - error handling implementation improved crypto: omap-aes - redundant locking is removed crypto: omap-aes - DMA initialization fixes for OMAP off mode ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/mv_cesa.c2
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-aes.c260
-rw-r--r--drivers/crypto/omap-sham.c374
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/padlock.h23
7 files changed, 355 insertions, 316 deletions
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 7d279e578df5..c99305afa58a 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -857,7 +857,7 @@ static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
857 printk(KERN_WARNING MV_CESA 857 printk(KERN_WARNING MV_CESA
858 "Base driver '%s' could not be loaded!\n", 858 "Base driver '%s' could not be loaded!\n",
859 base_hash_name); 859 base_hash_name);
860 err = PTR_ERR(fallback_tfm); 860 err = PTR_ERR(base_hash);
861 goto err_bad_base; 861 goto err_bad_base;
862 } 862 }
863 } 863 }
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 76141262ea1d..80dc094e78c6 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1542,7 +1542,7 @@ out:
1542 return err; 1542 return err;
1543} 1543}
1544 1544
1545static void __exit n2_unregister_algs(void) 1545static void __devexit n2_unregister_algs(void)
1546{ 1546{
1547 mutex_lock(&spu_lock); 1547 mutex_lock(&spu_lock);
1548 if (!--algs_registered) 1548 if (!--algs_registered)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 799ca517c121..add2a1a72ba4 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -74,11 +74,9 @@
74#define FLAGS_CBC BIT(1) 74#define FLAGS_CBC BIT(1)
75#define FLAGS_GIV BIT(2) 75#define FLAGS_GIV BIT(2)
76 76
77#define FLAGS_NEW_KEY BIT(4) 77#define FLAGS_INIT BIT(4)
78#define FLAGS_NEW_IV BIT(5) 78#define FLAGS_FAST BIT(5)
79#define FLAGS_INIT BIT(6) 79#define FLAGS_BUSY BIT(6)
80#define FLAGS_FAST BIT(7)
81#define FLAGS_BUSY 8
82 80
83struct omap_aes_ctx { 81struct omap_aes_ctx {
84 struct omap_aes_dev *dd; 82 struct omap_aes_dev *dd;
@@ -98,19 +96,18 @@ struct omap_aes_reqctx {
98struct omap_aes_dev { 96struct omap_aes_dev {
99 struct list_head list; 97 struct list_head list;
100 unsigned long phys_base; 98 unsigned long phys_base;
101 void __iomem *io_base; 99 void __iomem *io_base;
102 struct clk *iclk; 100 struct clk *iclk;
103 struct omap_aes_ctx *ctx; 101 struct omap_aes_ctx *ctx;
104 struct device *dev; 102 struct device *dev;
105 unsigned long flags; 103 unsigned long flags;
104 int err;
106 105
107 u32 *iv; 106 spinlock_t lock;
108 u32 ctrl; 107 struct crypto_queue queue;
109 108
110 spinlock_t lock; 109 struct tasklet_struct done_task;
111 struct crypto_queue queue; 110 struct tasklet_struct queue_task;
112
113 struct tasklet_struct task;
114 111
115 struct ablkcipher_request *req; 112 struct ablkcipher_request *req;
116 size_t total; 113 size_t total;
@@ -179,9 +176,13 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
179 176
180static int omap_aes_hw_init(struct omap_aes_dev *dd) 177static int omap_aes_hw_init(struct omap_aes_dev *dd)
181{ 178{
182 int err = 0; 179 /*
183 180 * clocks are enabled when request starts and disabled when finished.
181 * It may be long delays between requests.
182 * Device might go to off mode to save power.
183 */
184 clk_enable(dd->iclk); 184 clk_enable(dd->iclk);
185
185 if (!(dd->flags & FLAGS_INIT)) { 186 if (!(dd->flags & FLAGS_INIT)) {
186 /* is it necessary to reset before every operation? */ 187 /* is it necessary to reset before every operation? */
187 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, 188 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
@@ -193,39 +194,26 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
193 __asm__ __volatile__("nop"); 194 __asm__ __volatile__("nop");
194 __asm__ __volatile__("nop"); 195 __asm__ __volatile__("nop");
195 196
196 err = omap_aes_wait(dd, AES_REG_SYSSTATUS, 197 if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
197 AES_REG_SYSSTATUS_RESETDONE); 198 AES_REG_SYSSTATUS_RESETDONE))
198 if (!err) 199 return -ETIMEDOUT;
199 dd->flags |= FLAGS_INIT;
200 }
201 200
202 return err; 201 dd->flags |= FLAGS_INIT;
203} 202 dd->err = 0;
203 }
204 204
205static void omap_aes_hw_cleanup(struct omap_aes_dev *dd) 205 return 0;
206{
207 clk_disable(dd->iclk);
208} 206}
209 207
210static void omap_aes_write_ctrl(struct omap_aes_dev *dd) 208static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
211{ 209{
212 unsigned int key32; 210 unsigned int key32;
213 int i; 211 int i, err;
214 u32 val, mask; 212 u32 val, mask;
215 213
216 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); 214 err = omap_aes_hw_init(dd);
217 if (dd->flags & FLAGS_CBC) 215 if (err)
218 val |= AES_REG_CTRL_CBC; 216 return err;
219 if (dd->flags & FLAGS_ENCRYPT)
220 val |= AES_REG_CTRL_DIRECTION;
221
222 if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
223 !(dd->ctx->flags & FLAGS_NEW_KEY))
224 goto out;
225
226 /* only need to write control registers for new settings */
227
228 dd->ctrl = val;
229 217
230 val = 0; 218 val = 0;
231 if (dd->dma_lch_out >= 0) 219 if (dd->dma_lch_out >= 0)
@@ -237,30 +225,43 @@ static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
237 225
238 omap_aes_write_mask(dd, AES_REG_MASK, val, mask); 226 omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
239 227
240 pr_debug("Set key\n");
241 key32 = dd->ctx->keylen / sizeof(u32); 228 key32 = dd->ctx->keylen / sizeof(u32);
242 /* set a key */ 229
230 /* it seems a key should always be set even if it has not changed */
243 for (i = 0; i < key32; i++) { 231 for (i = 0; i < key32; i++) {
244 omap_aes_write(dd, AES_REG_KEY(i), 232 omap_aes_write(dd, AES_REG_KEY(i),
245 __le32_to_cpu(dd->ctx->key[i])); 233 __le32_to_cpu(dd->ctx->key[i]));
246 } 234 }
247 dd->ctx->flags &= ~FLAGS_NEW_KEY;
248 235
249 if (dd->flags & FLAGS_NEW_IV) { 236 if ((dd->flags & FLAGS_CBC) && dd->req->info)
250 pr_debug("Set IV\n"); 237 omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
251 omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4); 238
252 dd->flags &= ~FLAGS_NEW_IV; 239 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
253 } 240 if (dd->flags & FLAGS_CBC)
241 val |= AES_REG_CTRL_CBC;
242 if (dd->flags & FLAGS_ENCRYPT)
243 val |= AES_REG_CTRL_DIRECTION;
254 244
255 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | 245 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
256 AES_REG_CTRL_KEY_SIZE; 246 AES_REG_CTRL_KEY_SIZE;
257 247
258 omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask); 248 omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
259 249
260out: 250 /* IN */
261 /* start DMA or disable idle mode */ 251 omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
262 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, 252 dd->phys_base + AES_REG_DATA, 0, 4);
263 AES_REG_MASK_START); 253
254 omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
255 omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
256
257 /* OUT */
258 omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
259 dd->phys_base + AES_REG_DATA, 0, 4);
260
261 omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
262 omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
263
264 return 0;
264} 265}
265 266
266static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) 267static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
@@ -288,8 +289,16 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
288{ 289{
289 struct omap_aes_dev *dd = data; 290 struct omap_aes_dev *dd = data;
290 291
291 if (lch == dd->dma_lch_out) 292 if (ch_status != OMAP_DMA_BLOCK_IRQ) {
292 tasklet_schedule(&dd->task); 293 pr_err("omap-aes DMA error status: 0x%hx\n", ch_status);
294 dd->err = -EIO;
295 dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
296 } else if (lch == dd->dma_lch_in) {
297 return;
298 }
299
300 /* dma_lch_out - completed */
301 tasklet_schedule(&dd->done_task);
293} 302}
294 303
295static int omap_aes_dma_init(struct omap_aes_dev *dd) 304static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -339,18 +348,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
339 goto err_dma_out; 348 goto err_dma_out;
340 } 349 }
341 350
342 omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
343 dd->phys_base + AES_REG_DATA, 0, 4);
344
345 omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
346 omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
347
348 omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
349 dd->phys_base + AES_REG_DATA, 0, 4);
350
351 omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
352 omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
353
354 return 0; 351 return 0;
355 352
356err_dma_out: 353err_dma_out:
@@ -406,6 +403,11 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
406 if (!count) 403 if (!count)
407 return off; 404 return off;
408 405
406 /*
407 * buflen and total are AES_BLOCK_SIZE size aligned,
408 * so count should be also aligned
409 */
410
409 sg_copy_buf(buf + off, *sg, *offset, count, out); 411 sg_copy_buf(buf + off, *sg, *offset, count, out);
410 412
411 off += count; 413 off += count;
@@ -461,7 +463,9 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
461 omap_start_dma(dd->dma_lch_in); 463 omap_start_dma(dd->dma_lch_in);
462 omap_start_dma(dd->dma_lch_out); 464 omap_start_dma(dd->dma_lch_out);
463 465
464 omap_aes_write_ctrl(dd); 466 /* start DMA or disable idle mode */
467 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
468 AES_REG_MASK_START);
465 469
466 return 0; 470 return 0;
467} 471}
@@ -488,8 +492,10 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
488 count = min(dd->total, sg_dma_len(dd->in_sg)); 492 count = min(dd->total, sg_dma_len(dd->in_sg));
489 count = min(count, sg_dma_len(dd->out_sg)); 493 count = min(count, sg_dma_len(dd->out_sg));
490 494
491 if (count != dd->total) 495 if (count != dd->total) {
496 pr_err("request length != buffer length\n");
492 return -EINVAL; 497 return -EINVAL;
498 }
493 499
494 pr_debug("fast\n"); 500 pr_debug("fast\n");
495 501
@@ -525,23 +531,25 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
525 531
526 dd->total -= count; 532 dd->total -= count;
527 533
528 err = omap_aes_hw_init(dd);
529
530 err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); 534 err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
535 if (err) {
536 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
537 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
538 }
531 539
532 return err; 540 return err;
533} 541}
534 542
535static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) 543static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
536{ 544{
537 struct omap_aes_ctx *ctx; 545 struct ablkcipher_request *req = dd->req;
538 546
539 pr_debug("err: %d\n", err); 547 pr_debug("err: %d\n", err);
540 548
541 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); 549 clk_disable(dd->iclk);
550 dd->flags &= ~FLAGS_BUSY;
542 551
543 if (!dd->total) 552 req->base.complete(&req->base, err);
544 dd->req->base.complete(&dd->req->base, err);
545} 553}
546 554
547static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) 555static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -553,8 +561,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
553 561
554 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); 562 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
555 563
556 omap_aes_hw_cleanup(dd);
557
558 omap_stop_dma(dd->dma_lch_in); 564 omap_stop_dma(dd->dma_lch_in);
559 omap_stop_dma(dd->dma_lch_out); 565 omap_stop_dma(dd->dma_lch_out);
560 566
@@ -574,40 +580,39 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
574 } 580 }
575 } 581 }
576 582
577 if (err || !dd->total)
578 omap_aes_finish_req(dd, err);
579
580 return err; 583 return err;
581} 584}
582 585
583static int omap_aes_handle_req(struct omap_aes_dev *dd) 586static int omap_aes_handle_queue(struct omap_aes_dev *dd,
587 struct ablkcipher_request *req)
584{ 588{
585 struct crypto_async_request *async_req, *backlog; 589 struct crypto_async_request *async_req, *backlog;
586 struct omap_aes_ctx *ctx; 590 struct omap_aes_ctx *ctx;
587 struct omap_aes_reqctx *rctx; 591 struct omap_aes_reqctx *rctx;
588 struct ablkcipher_request *req;
589 unsigned long flags; 592 unsigned long flags;
590 593 int err, ret = 0;
591 if (dd->total)
592 goto start;
593 594
594 spin_lock_irqsave(&dd->lock, flags); 595 spin_lock_irqsave(&dd->lock, flags);
596 if (req)
597 ret = ablkcipher_enqueue_request(&dd->queue, req);
598 if (dd->flags & FLAGS_BUSY) {
599 spin_unlock_irqrestore(&dd->lock, flags);
600 return ret;
601 }
595 backlog = crypto_get_backlog(&dd->queue); 602 backlog = crypto_get_backlog(&dd->queue);
596 async_req = crypto_dequeue_request(&dd->queue); 603 async_req = crypto_dequeue_request(&dd->queue);
597 if (!async_req) 604 if (async_req)
598 clear_bit(FLAGS_BUSY, &dd->flags); 605 dd->flags |= FLAGS_BUSY;
599 spin_unlock_irqrestore(&dd->lock, flags); 606 spin_unlock_irqrestore(&dd->lock, flags);
600 607
601 if (!async_req) 608 if (!async_req)
602 return 0; 609 return ret;
603 610
604 if (backlog) 611 if (backlog)
605 backlog->complete(backlog, -EINPROGRESS); 612 backlog->complete(backlog, -EINPROGRESS);
606 613
607 req = ablkcipher_request_cast(async_req); 614 req = ablkcipher_request_cast(async_req);
608 615
609 pr_debug("get new req\n");
610
611 /* assign new request to device */ 616 /* assign new request to device */
612 dd->req = req; 617 dd->req = req;
613 dd->total = req->nbytes; 618 dd->total = req->nbytes;
@@ -621,27 +626,22 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd)
621 rctx->mode &= FLAGS_MODE_MASK; 626 rctx->mode &= FLAGS_MODE_MASK;
622 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; 627 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
623 628
624 dd->iv = req->info; 629 dd->ctx = ctx;
625 if ((dd->flags & FLAGS_CBC) && dd->iv)
626 dd->flags |= FLAGS_NEW_IV;
627 else
628 dd->flags &= ~FLAGS_NEW_IV;
629
630 ctx->dd = dd; 630 ctx->dd = dd;
631 if (dd->ctx != ctx) {
632 /* assign new context to device */
633 dd->ctx = ctx;
634 ctx->flags |= FLAGS_NEW_KEY;
635 }
636 631
637 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) 632 err = omap_aes_write_ctrl(dd);
638 pr_err("request size is not exact amount of AES blocks\n"); 633 if (!err)
634 err = omap_aes_crypt_dma_start(dd);
635 if (err) {
636 /* aes_task will not finish it, so do it here */
637 omap_aes_finish_req(dd, err);
638 tasklet_schedule(&dd->queue_task);
639 }
639 640
640start: 641 return ret; /* return ret, which is enqueue return value */
641 return omap_aes_crypt_dma_start(dd);
642} 642}
643 643
644static void omap_aes_task(unsigned long data) 644static void omap_aes_done_task(unsigned long data)
645{ 645{
646 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 646 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
647 int err; 647 int err;
@@ -650,40 +650,50 @@ static void omap_aes_task(unsigned long data)
650 650
651 err = omap_aes_crypt_dma_stop(dd); 651 err = omap_aes_crypt_dma_stop(dd);
652 652
653 err = omap_aes_handle_req(dd); 653 err = dd->err ? : err;
654
655 if (dd->total && !err) {
656 err = omap_aes_crypt_dma_start(dd);
657 if (!err)
658 return; /* DMA started. Not fininishing. */
659 }
660
661 omap_aes_finish_req(dd, err);
662 omap_aes_handle_queue(dd, NULL);
654 663
655 pr_debug("exit\n"); 664 pr_debug("exit\n");
656} 665}
657 666
667static void omap_aes_queue_task(unsigned long data)
668{
669 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
670
671 omap_aes_handle_queue(dd, NULL);
672}
673
658static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 674static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
659{ 675{
660 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( 676 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
661 crypto_ablkcipher_reqtfm(req)); 677 crypto_ablkcipher_reqtfm(req));
662 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); 678 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
663 struct omap_aes_dev *dd; 679 struct omap_aes_dev *dd;
664 unsigned long flags;
665 int err;
666 680
667 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, 681 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
668 !!(mode & FLAGS_ENCRYPT), 682 !!(mode & FLAGS_ENCRYPT),
669 !!(mode & FLAGS_CBC)); 683 !!(mode & FLAGS_CBC));
670 684
685 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
686 pr_err("request size is not exact amount of AES blocks\n");
687 return -EINVAL;
688 }
689
671 dd = omap_aes_find_dev(ctx); 690 dd = omap_aes_find_dev(ctx);
672 if (!dd) 691 if (!dd)
673 return -ENODEV; 692 return -ENODEV;
674 693
675 rctx->mode = mode; 694 rctx->mode = mode;
676 695
677 spin_lock_irqsave(&dd->lock, flags); 696 return omap_aes_handle_queue(dd, req);
678 err = ablkcipher_enqueue_request(&dd->queue, req);
679 spin_unlock_irqrestore(&dd->lock, flags);
680
681 if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
682 omap_aes_handle_req(dd);
683
684 pr_debug("exit\n");
685
686 return err;
687} 697}
688 698
689/* ********************** ALG API ************************************ */ 699/* ********************** ALG API ************************************ */
@@ -701,7 +711,6 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
701 711
702 memcpy(ctx->key, key, keylen); 712 memcpy(ctx->key, key, keylen);
703 ctx->keylen = keylen; 713 ctx->keylen = keylen;
704 ctx->flags |= FLAGS_NEW_KEY;
705 714
706 return 0; 715 return 0;
707} 716}
@@ -750,7 +759,7 @@ static struct crypto_alg algs[] = {
750 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 759 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
751 .cra_blocksize = AES_BLOCK_SIZE, 760 .cra_blocksize = AES_BLOCK_SIZE,
752 .cra_ctxsize = sizeof(struct omap_aes_ctx), 761 .cra_ctxsize = sizeof(struct omap_aes_ctx),
753 .cra_alignmask = 0, 762 .cra_alignmask = 0,
754 .cra_type = &crypto_ablkcipher_type, 763 .cra_type = &crypto_ablkcipher_type,
755 .cra_module = THIS_MODULE, 764 .cra_module = THIS_MODULE,
756 .cra_init = omap_aes_cra_init, 765 .cra_init = omap_aes_cra_init,
@@ -770,7 +779,7 @@ static struct crypto_alg algs[] = {
770 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 779 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
771 .cra_blocksize = AES_BLOCK_SIZE, 780 .cra_blocksize = AES_BLOCK_SIZE,
772 .cra_ctxsize = sizeof(struct omap_aes_ctx), 781 .cra_ctxsize = sizeof(struct omap_aes_ctx),
773 .cra_alignmask = 0, 782 .cra_alignmask = 0,
774 .cra_type = &crypto_ablkcipher_type, 783 .cra_type = &crypto_ablkcipher_type,
775 .cra_module = THIS_MODULE, 784 .cra_module = THIS_MODULE,
776 .cra_init = omap_aes_cra_init, 785 .cra_init = omap_aes_cra_init,
@@ -849,7 +858,8 @@ static int omap_aes_probe(struct platform_device *pdev)
849 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); 858 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
850 clk_disable(dd->iclk); 859 clk_disable(dd->iclk);
851 860
852 tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd); 861 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
862 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
853 863
854 err = omap_aes_dma_init(dd); 864 err = omap_aes_dma_init(dd);
855 if (err) 865 if (err)
@@ -876,7 +886,8 @@ err_algs:
876 crypto_unregister_alg(&algs[j]); 886 crypto_unregister_alg(&algs[j]);
877 omap_aes_dma_cleanup(dd); 887 omap_aes_dma_cleanup(dd);
878err_dma: 888err_dma:
879 tasklet_kill(&dd->task); 889 tasklet_kill(&dd->done_task);
890 tasklet_kill(&dd->queue_task);
880 iounmap(dd->io_base); 891 iounmap(dd->io_base);
881err_io: 892err_io:
882 clk_put(dd->iclk); 893 clk_put(dd->iclk);
@@ -903,7 +914,8 @@ static int omap_aes_remove(struct platform_device *pdev)
903 for (i = 0; i < ARRAY_SIZE(algs); i++) 914 for (i = 0; i < ARRAY_SIZE(algs); i++)
904 crypto_unregister_alg(&algs[i]); 915 crypto_unregister_alg(&algs[i]);
905 916
906 tasklet_kill(&dd->task); 917 tasklet_kill(&dd->done_task);
918 tasklet_kill(&dd->queue_task);
907 omap_aes_dma_cleanup(dd); 919 omap_aes_dma_cleanup(dd);
908 iounmap(dd->io_base); 920 iounmap(dd->io_base);
909 clk_put(dd->iclk); 921 clk_put(dd->iclk);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index a081c7c7d03f..2e71123516e0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,10 +72,9 @@
72 72
73#define DEFAULT_TIMEOUT_INTERVAL HZ 73#define DEFAULT_TIMEOUT_INTERVAL HZ
74 74
75#define FLAGS_FIRST 0x0001
76#define FLAGS_FINUP 0x0002 75#define FLAGS_FINUP 0x0002
77#define FLAGS_FINAL 0x0004 76#define FLAGS_FINAL 0x0004
78#define FLAGS_FAST 0x0008 77#define FLAGS_SG 0x0008
79#define FLAGS_SHA1 0x0010 78#define FLAGS_SHA1 0x0010
80#define FLAGS_DMA_ACTIVE 0x0020 79#define FLAGS_DMA_ACTIVE 0x0020
81#define FLAGS_OUTPUT_READY 0x0040 80#define FLAGS_OUTPUT_READY 0x0040
@@ -83,13 +82,17 @@
83#define FLAGS_INIT 0x0100 82#define FLAGS_INIT 0x0100
84#define FLAGS_CPU 0x0200 83#define FLAGS_CPU 0x0200
85#define FLAGS_HMAC 0x0400 84#define FLAGS_HMAC 0x0400
86 85#define FLAGS_ERROR 0x0800
87/* 3rd byte */ 86#define FLAGS_BUSY 0x1000
88#define FLAGS_BUSY 16
89 87
90#define OP_UPDATE 1 88#define OP_UPDATE 1
91#define OP_FINAL 2 89#define OP_FINAL 2
92 90
91#define OMAP_ALIGN_MASK (sizeof(u32)-1)
92#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
93
94#define BUFLEN PAGE_SIZE
95
93struct omap_sham_dev; 96struct omap_sham_dev;
94 97
95struct omap_sham_reqctx { 98struct omap_sham_reqctx {
@@ -97,8 +100,8 @@ struct omap_sham_reqctx {
97 unsigned long flags; 100 unsigned long flags;
98 unsigned long op; 101 unsigned long op;
99 102
103 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
100 size_t digcnt; 104 size_t digcnt;
101 u8 *buffer;
102 size_t bufcnt; 105 size_t bufcnt;
103 size_t buflen; 106 size_t buflen;
104 dma_addr_t dma_addr; 107 dma_addr_t dma_addr;
@@ -107,6 +110,8 @@ struct omap_sham_reqctx {
107 struct scatterlist *sg; 110 struct scatterlist *sg;
108 unsigned int offset; /* offset in current sg */ 111 unsigned int offset; /* offset in current sg */
109 unsigned int total; /* total request */ 112 unsigned int total; /* total request */
113
114 u8 buffer[0] OMAP_ALIGNED;
110}; 115};
111 116
112struct omap_sham_hmac_ctx { 117struct omap_sham_hmac_ctx {
@@ -136,6 +141,7 @@ struct omap_sham_dev {
136 int irq; 141 int irq;
137 struct clk *iclk; 142 struct clk *iclk;
138 spinlock_t lock; 143 spinlock_t lock;
144 int err;
139 int dma; 145 int dma;
140 int dma_lch; 146 int dma_lch;
141 struct tasklet_struct done_task; 147 struct tasklet_struct done_task;
@@ -194,53 +200,68 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
194static void omap_sham_copy_hash(struct ahash_request *req, int out) 200static void omap_sham_copy_hash(struct ahash_request *req, int out)
195{ 201{
196 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 202 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
203 u32 *hash = (u32 *)ctx->digest;
204 int i;
205
206 /* MD5 is almost unused. So copy sha1 size to reduce code */
207 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
208 if (out)
209 hash[i] = omap_sham_read(ctx->dd,
210 SHA_REG_DIGEST(i));
211 else
212 omap_sham_write(ctx->dd,
213 SHA_REG_DIGEST(i), hash[i]);
214 }
215}
216
217static void omap_sham_copy_ready_hash(struct ahash_request *req)
218{
219 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
220 u32 *in = (u32 *)ctx->digest;
197 u32 *hash = (u32 *)req->result; 221 u32 *hash = (u32 *)req->result;
198 int i; 222 int i;
199 223
224 if (!hash)
225 return;
226
200 if (likely(ctx->flags & FLAGS_SHA1)) { 227 if (likely(ctx->flags & FLAGS_SHA1)) {
201 /* SHA1 results are in big endian */ 228 /* SHA1 results are in big endian */
202 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) 229 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
203 if (out) 230 hash[i] = be32_to_cpu(in[i]);
204 hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
205 SHA_REG_DIGEST(i)));
206 else
207 omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
208 cpu_to_be32(hash[i]));
209 } else { 231 } else {
210 /* MD5 results are in little endian */ 232 /* MD5 results are in little endian */
211 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) 233 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
212 if (out) 234 hash[i] = le32_to_cpu(in[i]);
213 hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
214 SHA_REG_DIGEST(i)));
215 else
216 omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
217 cpu_to_le32(hash[i]));
218 } 235 }
219} 236}
220 237
221static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, 238static int omap_sham_hw_init(struct omap_sham_dev *dd)
222 int final, int dma)
223{ 239{
224 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 240 clk_enable(dd->iclk);
225 u32 val = length << 5, mask;
226 241
227 if (unlikely(!ctx->digcnt)) { 242 if (!(dd->flags & FLAGS_INIT)) {
243 omap_sham_write_mask(dd, SHA_REG_MASK,
244 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
228 245
229 clk_enable(dd->iclk); 246 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
247 SHA_REG_SYSSTATUS_RESETDONE))
248 return -ETIMEDOUT;
230 249
231 if (!(dd->flags & FLAGS_INIT)) { 250 dd->flags |= FLAGS_INIT;
232 omap_sham_write_mask(dd, SHA_REG_MASK, 251 dd->err = 0;
233 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); 252 }
234 253
235 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, 254 return 0;
236 SHA_REG_SYSSTATUS_RESETDONE)) 255}
237 return -ETIMEDOUT;
238 256
239 dd->flags |= FLAGS_INIT; 257static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
240 } 258 int final, int dma)
241 } else { 259{
260 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
261 u32 val = length << 5, mask;
262
263 if (likely(ctx->digcnt))
242 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); 264 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
243 }
244 265
245 omap_sham_write_mask(dd, SHA_REG_MASK, 266 omap_sham_write_mask(dd, SHA_REG_MASK,
246 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), 267 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
@@ -260,29 +281,26 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
260 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; 281 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
261 282
262 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); 283 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
263
264 return 0;
265} 284}
266 285
267static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, 286static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
268 size_t length, int final) 287 size_t length, int final)
269{ 288{
270 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 289 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
271 int err, count, len32; 290 int count, len32;
272 const u32 *buffer = (const u32 *)buf; 291 const u32 *buffer = (const u32 *)buf;
273 292
274 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", 293 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
275 ctx->digcnt, length, final); 294 ctx->digcnt, length, final);
276 295
277 err = omap_sham_write_ctrl(dd, length, final, 0); 296 omap_sham_write_ctrl(dd, length, final, 0);
278 if (err) 297
279 return err; 298 /* should be non-zero before next lines to disable clocks later */
299 ctx->digcnt += length;
280 300
281 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) 301 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
282 return -ETIMEDOUT; 302 return -ETIMEDOUT;
283 303
284 ctx->digcnt += length;
285
286 if (final) 304 if (final)
287 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ 305 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
288 306
@@ -298,16 +316,11 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
298 size_t length, int final) 316 size_t length, int final)
299{ 317{
300 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 318 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
301 int err, len32; 319 int len32;
302 320
303 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", 321 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
304 ctx->digcnt, length, final); 322 ctx->digcnt, length, final);
305 323
306 /* flush cache entries related to our page */
307 if (dma_addr == ctx->dma_addr)
308 dma_sync_single_for_device(dd->dev, dma_addr, length,
309 DMA_TO_DEVICE);
310
311 len32 = DIV_ROUND_UP(length, sizeof(u32)); 324 len32 = DIV_ROUND_UP(length, sizeof(u32));
312 325
313 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 326 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
@@ -317,9 +330,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
317 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 330 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
318 dma_addr, 0, 0); 331 dma_addr, 0, 0);
319 332
320 err = omap_sham_write_ctrl(dd, length, final, 1); 333 omap_sham_write_ctrl(dd, length, final, 1);
321 if (err)
322 return err;
323 334
324 ctx->digcnt += length; 335 ctx->digcnt += length;
325 336
@@ -371,15 +382,29 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
371 return 0; 382 return 0;
372} 383}
373 384
385static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
386 struct omap_sham_reqctx *ctx,
387 size_t length, int final)
388{
389 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
390 DMA_TO_DEVICE);
391 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
392 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
393 return -EINVAL;
394 }
395
396 ctx->flags &= ~FLAGS_SG;
397
398 /* next call does not fail... so no unmap in the case of error */
399 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
400}
401
374static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) 402static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
375{ 403{
376 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 404 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
377 unsigned int final; 405 unsigned int final;
378 size_t count; 406 size_t count;
379 407
380 if (!ctx->total)
381 return 0;
382
383 omap_sham_append_sg(ctx); 408 omap_sham_append_sg(ctx);
384 409
385 final = (ctx->flags & FLAGS_FINUP) && !ctx->total; 410 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
@@ -390,30 +415,68 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
390 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { 415 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
391 count = ctx->bufcnt; 416 count = ctx->bufcnt;
392 ctx->bufcnt = 0; 417 ctx->bufcnt = 0;
393 return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); 418 return omap_sham_xmit_dma_map(dd, ctx, count, final);
394 } 419 }
395 420
396 return 0; 421 return 0;
397} 422}
398 423
399static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) 424/* Start address alignment */
425#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
426/* SHA1 block size alignment */
427#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
428
429static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
400{ 430{
401 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 431 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
402 unsigned int length; 432 unsigned int length, final, tail;
433 struct scatterlist *sg;
403 434
404 ctx->flags |= FLAGS_FAST; 435 if (!ctx->total)
436 return 0;
437
438 if (ctx->bufcnt || ctx->offset)
439 return omap_sham_update_dma_slow(dd);
440
441 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
442 ctx->digcnt, ctx->bufcnt, ctx->total);
443
444 sg = ctx->sg;
405 445
406 length = min(ctx->total, sg_dma_len(ctx->sg)); 446 if (!SG_AA(sg))
407 ctx->total = length; 447 return omap_sham_update_dma_slow(dd);
448
449 if (!sg_is_last(sg) && !SG_SA(sg))
450 /* size is not SHA1_BLOCK_SIZE aligned */
451 return omap_sham_update_dma_slow(dd);
452
453 length = min(ctx->total, sg->length);
454
455 if (sg_is_last(sg)) {
456 if (!(ctx->flags & FLAGS_FINUP)) {
457 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
458 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
459 /* without finup() we need one block to close hash */
460 if (!tail)
461 tail = SHA1_MD5_BLOCK_SIZE;
462 length -= tail;
463 }
464 }
408 465
409 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 466 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
410 dev_err(dd->dev, "dma_map_sg error\n"); 467 dev_err(dd->dev, "dma_map_sg error\n");
411 return -EINVAL; 468 return -EINVAL;
412 } 469 }
413 470
471 ctx->flags |= FLAGS_SG;
472
414 ctx->total -= length; 473 ctx->total -= length;
474 ctx->offset = length; /* offset where to start slow */
415 475
416 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); 476 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
477
478 /* next call does not fail... so no unmap in the case of error */
479 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
417} 480}
418 481
419static int omap_sham_update_cpu(struct omap_sham_dev *dd) 482static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -433,8 +496,17 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
433 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 496 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
434 497
435 omap_stop_dma(dd->dma_lch); 498 omap_stop_dma(dd->dma_lch);
436 if (ctx->flags & FLAGS_FAST) 499 if (ctx->flags & FLAGS_SG) {
437 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 500 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
501 if (ctx->sg->length == ctx->offset) {
502 ctx->sg = sg_next(ctx->sg);
503 if (ctx->sg)
504 ctx->offset = 0;
505 }
506 } else {
507 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
508 DMA_TO_DEVICE);
509 }
438 510
439 return 0; 511 return 0;
440} 512}
@@ -454,14 +526,7 @@ static void omap_sham_cleanup(struct ahash_request *req)
454 spin_unlock_irqrestore(&dd->lock, flags); 526 spin_unlock_irqrestore(&dd->lock, flags);
455 527
456 if (ctx->digcnt) 528 if (ctx->digcnt)
457 clk_disable(dd->iclk); 529 omap_sham_copy_ready_hash(req);
458
459 if (ctx->dma_addr)
460 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
461 DMA_TO_DEVICE);
462
463 if (ctx->buffer)
464 free_page((unsigned long)ctx->buffer);
465 530
466 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); 531 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
467} 532}
@@ -489,8 +554,6 @@ static int omap_sham_init(struct ahash_request *req)
489 554
490 ctx->flags = 0; 555 ctx->flags = 0;
491 556
492 ctx->flags |= FLAGS_FIRST;
493
494 dev_dbg(dd->dev, "init: digest size: %d\n", 557 dev_dbg(dd->dev, "init: digest size: %d\n",
495 crypto_ahash_digestsize(tfm)); 558 crypto_ahash_digestsize(tfm));
496 559
@@ -499,21 +562,7 @@ static int omap_sham_init(struct ahash_request *req)
499 562
500 ctx->bufcnt = 0; 563 ctx->bufcnt = 0;
501 ctx->digcnt = 0; 564 ctx->digcnt = 0;
502 565 ctx->buflen = BUFLEN;
503 ctx->buflen = PAGE_SIZE;
504 ctx->buffer = (void *)__get_free_page(
505 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
506 GFP_KERNEL : GFP_ATOMIC);
507 if (!ctx->buffer)
508 return -ENOMEM;
509
510 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
511 DMA_TO_DEVICE);
512 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
513 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
514 free_page((unsigned long)ctx->buffer);
515 return -EINVAL;
516 }
517 566
518 if (tctx->flags & FLAGS_HMAC) { 567 if (tctx->flags & FLAGS_HMAC) {
519 struct omap_sham_hmac_ctx *bctx = tctx->base; 568 struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -538,10 +587,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
538 587
539 if (ctx->flags & FLAGS_CPU) 588 if (ctx->flags & FLAGS_CPU)
540 err = omap_sham_update_cpu(dd); 589 err = omap_sham_update_cpu(dd);
541 else if (ctx->flags & FLAGS_FAST)
542 err = omap_sham_update_dma_fast(dd);
543 else 590 else
544 err = omap_sham_update_dma_slow(dd); 591 err = omap_sham_update_dma_start(dd);
545 592
546 /* wait for dma completion before can take more data */ 593 /* wait for dma completion before can take more data */
547 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); 594 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -560,15 +607,12 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
560 use_dma = 0; 607 use_dma = 0;
561 608
562 if (use_dma) 609 if (use_dma)
563 err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); 610 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
564 else 611 else
565 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); 612 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
566 613
567 ctx->bufcnt = 0; 614 ctx->bufcnt = 0;
568 615
569 if (err != -EINPROGRESS)
570 omap_sham_cleanup(req);
571
572 dev_dbg(dd->dev, "final_req: err: %d\n", err); 616 dev_dbg(dd->dev, "final_req: err: %d\n", err);
573 617
574 return err; 618 return err;
@@ -576,6 +620,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
576 620
577static int omap_sham_finish_req_hmac(struct ahash_request *req) 621static int omap_sham_finish_req_hmac(struct ahash_request *req)
578{ 622{
623 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
579 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 624 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
580 struct omap_sham_hmac_ctx *bctx = tctx->base; 625 struct omap_sham_hmac_ctx *bctx = tctx->base;
581 int bs = crypto_shash_blocksize(bctx->shash); 626 int bs = crypto_shash_blocksize(bctx->shash);
@@ -590,48 +635,56 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req)
590 635
591 return crypto_shash_init(&desc.shash) ?: 636 return crypto_shash_init(&desc.shash) ?:
592 crypto_shash_update(&desc.shash, bctx->opad, bs) ?: 637 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
593 crypto_shash_finup(&desc.shash, req->result, ds, req->result); 638 crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest);
594} 639}
595 640
596static void omap_sham_finish_req(struct ahash_request *req, int err) 641static void omap_sham_finish_req(struct ahash_request *req, int err)
597{ 642{
598 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 643 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
644 struct omap_sham_dev *dd = ctx->dd;
599 645
600 if (!err) { 646 if (!err) {
601 omap_sham_copy_hash(ctx->dd->req, 1); 647 omap_sham_copy_hash(ctx->dd->req, 1);
602 if (ctx->flags & FLAGS_HMAC) 648 if (ctx->flags & FLAGS_HMAC)
603 err = omap_sham_finish_req_hmac(req); 649 err = omap_sham_finish_req_hmac(req);
650 } else {
651 ctx->flags |= FLAGS_ERROR;
604 } 652 }
605 653
606 if (ctx->flags & FLAGS_FINAL) 654 if ((ctx->flags & FLAGS_FINAL) || err)
607 omap_sham_cleanup(req); 655 omap_sham_cleanup(req);
608 656
609 clear_bit(FLAGS_BUSY, &ctx->dd->flags); 657 clk_disable(dd->iclk);
658 dd->flags &= ~FLAGS_BUSY;
610 659
611 if (req->base.complete) 660 if (req->base.complete)
612 req->base.complete(&req->base, err); 661 req->base.complete(&req->base, err);
613} 662}
614 663
615static int omap_sham_handle_queue(struct omap_sham_dev *dd) 664static int omap_sham_handle_queue(struct omap_sham_dev *dd,
665 struct ahash_request *req)
616{ 666{
617 struct crypto_async_request *async_req, *backlog; 667 struct crypto_async_request *async_req, *backlog;
618 struct omap_sham_reqctx *ctx; 668 struct omap_sham_reqctx *ctx;
619 struct ahash_request *req, *prev_req; 669 struct ahash_request *prev_req;
620 unsigned long flags; 670 unsigned long flags;
621 int err = 0; 671 int err = 0, ret = 0;
622
623 if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
624 return 0;
625 672
626 spin_lock_irqsave(&dd->lock, flags); 673 spin_lock_irqsave(&dd->lock, flags);
674 if (req)
675 ret = ahash_enqueue_request(&dd->queue, req);
676 if (dd->flags & FLAGS_BUSY) {
677 spin_unlock_irqrestore(&dd->lock, flags);
678 return ret;
679 }
627 backlog = crypto_get_backlog(&dd->queue); 680 backlog = crypto_get_backlog(&dd->queue);
628 async_req = crypto_dequeue_request(&dd->queue); 681 async_req = crypto_dequeue_request(&dd->queue);
629 if (!async_req) 682 if (async_req)
630 clear_bit(FLAGS_BUSY, &dd->flags); 683 dd->flags |= FLAGS_BUSY;
631 spin_unlock_irqrestore(&dd->lock, flags); 684 spin_unlock_irqrestore(&dd->lock, flags);
632 685
633 if (!async_req) 686 if (!async_req)
634 return 0; 687 return ret;
635 688
636 if (backlog) 689 if (backlog)
637 backlog->complete(backlog, -EINPROGRESS); 690 backlog->complete(backlog, -EINPROGRESS);
@@ -646,7 +699,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
646 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 699 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
647 ctx->op, req->nbytes); 700 ctx->op, req->nbytes);
648 701
649 if (req != prev_req && ctx->digcnt) 702
703 err = omap_sham_hw_init(dd);
704 if (err)
705 goto err1;
706
707 omap_set_dma_dest_params(dd->dma_lch, 0,
708 OMAP_DMA_AMODE_CONSTANT,
709 dd->phys_base + SHA_REG_DIN(0), 0, 16);
710
711 omap_set_dma_dest_burst_mode(dd->dma_lch,
712 OMAP_DMA_DATA_BURST_16);
713
714 omap_set_dma_src_burst_mode(dd->dma_lch,
715 OMAP_DMA_DATA_BURST_4);
716
717 if (ctx->digcnt)
650 /* request has changed - restore hash */ 718 /* request has changed - restore hash */
651 omap_sham_copy_hash(req, 0); 719 omap_sham_copy_hash(req, 0);
652 720
@@ -658,7 +726,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
658 } else if (ctx->op == OP_FINAL) { 726 } else if (ctx->op == OP_FINAL) {
659 err = omap_sham_final_req(dd); 727 err = omap_sham_final_req(dd);
660 } 728 }
661 729err1:
662 if (err != -EINPROGRESS) { 730 if (err != -EINPROGRESS) {
663 /* done_task will not finish it, so do it here */ 731 /* done_task will not finish it, so do it here */
664 omap_sham_finish_req(req, err); 732 omap_sham_finish_req(req, err);
@@ -667,7 +735,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
667 735
668 dev_dbg(dd->dev, "exit, err: %d\n", err); 736 dev_dbg(dd->dev, "exit, err: %d\n", err);
669 737
670 return err; 738 return ret;
671} 739}
672 740
673static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) 741static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
@@ -675,18 +743,10 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
675 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 743 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
676 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 744 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
677 struct omap_sham_dev *dd = tctx->dd; 745 struct omap_sham_dev *dd = tctx->dd;
678 unsigned long flags;
679 int err;
680 746
681 ctx->op = op; 747 ctx->op = op;
682 748
683 spin_lock_irqsave(&dd->lock, flags); 749 return omap_sham_handle_queue(dd, req);
684 err = ahash_enqueue_request(&dd->queue, req);
685 spin_unlock_irqrestore(&dd->lock, flags);
686
687 omap_sham_handle_queue(dd);
688
689 return err;
690} 750}
691 751
692static int omap_sham_update(struct ahash_request *req) 752static int omap_sham_update(struct ahash_request *req)
@@ -709,21 +769,13 @@ static int omap_sham_update(struct ahash_request *req)
709 */ 769 */
710 omap_sham_append_sg(ctx); 770 omap_sham_append_sg(ctx);
711 return 0; 771 return 0;
712 } else if (ctx->bufcnt + ctx->total <= 64) { 772 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
773 /*
774 * faster to use CPU for short transfers
775 */
713 ctx->flags |= FLAGS_CPU; 776 ctx->flags |= FLAGS_CPU;
714 } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
715 /* may be can use faster functions */
716 int aligned = IS_ALIGNED((u32)ctx->sg->offset,
717 sizeof(u32));
718
719 if (aligned && (ctx->flags & FLAGS_FIRST))
720 /* digest: first and final */
721 ctx->flags |= FLAGS_FAST;
722
723 ctx->flags &= ~FLAGS_FIRST;
724 } 777 }
725 } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { 778 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
726 /* if not finaup -> not fast */
727 omap_sham_append_sg(ctx); 779 omap_sham_append_sg(ctx);
728 return 0; 780 return 0;
729 } 781 }
@@ -761,12 +813,14 @@ static int omap_sham_final(struct ahash_request *req)
761 813
762 ctx->flags |= FLAGS_FINUP; 814 ctx->flags |= FLAGS_FINUP;
763 815
764 /* OMAP HW accel works only with buffers >= 9 */ 816 if (!(ctx->flags & FLAGS_ERROR)) {
765 /* HMAC is always >= 9 because of ipad */ 817 /* OMAP HW accel works only with buffers >= 9 */
766 if ((ctx->digcnt + ctx->bufcnt) < 9) 818 /* HMAC is always >= 9 because of ipad */
767 err = omap_sham_final_shash(req); 819 if ((ctx->digcnt + ctx->bufcnt) < 9)
768 else if (ctx->bufcnt) 820 err = omap_sham_final_shash(req);
769 return omap_sham_enqueue(req, OP_FINAL); 821 else if (ctx->bufcnt)
822 return omap_sham_enqueue(req, OP_FINAL);
823 }
770 824
771 omap_sham_cleanup(req); 825 omap_sham_cleanup(req);
772 826
@@ -836,6 +890,8 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
836 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 890 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
837 const char *alg_name = crypto_tfm_alg_name(tfm); 891 const char *alg_name = crypto_tfm_alg_name(tfm);
838 892
893 pr_info("enter\n");
894
839 /* Allocate a fallback and abort if it failed. */ 895 /* Allocate a fallback and abort if it failed. */
840 tctx->fallback = crypto_alloc_shash(alg_name, 0, 896 tctx->fallback = crypto_alloc_shash(alg_name, 0,
841 CRYPTO_ALG_NEED_FALLBACK); 897 CRYPTO_ALG_NEED_FALLBACK);
@@ -846,7 +902,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
846 } 902 }
847 903
848 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 904 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
849 sizeof(struct omap_sham_reqctx)); 905 sizeof(struct omap_sham_reqctx) + BUFLEN);
850 906
851 if (alg_base) { 907 if (alg_base) {
852 struct omap_sham_hmac_ctx *bctx = tctx->base; 908 struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -932,7 +988,7 @@ static struct ahash_alg algs[] = {
932 CRYPTO_ALG_NEED_FALLBACK, 988 CRYPTO_ALG_NEED_FALLBACK,
933 .cra_blocksize = SHA1_BLOCK_SIZE, 989 .cra_blocksize = SHA1_BLOCK_SIZE,
934 .cra_ctxsize = sizeof(struct omap_sham_ctx), 990 .cra_ctxsize = sizeof(struct omap_sham_ctx),
935 .cra_alignmask = 0, 991 .cra_alignmask = OMAP_ALIGN_MASK,
936 .cra_module = THIS_MODULE, 992 .cra_module = THIS_MODULE,
937 .cra_init = omap_sham_cra_init, 993 .cra_init = omap_sham_cra_init,
938 .cra_exit = omap_sham_cra_exit, 994 .cra_exit = omap_sham_cra_exit,
@@ -956,7 +1012,7 @@ static struct ahash_alg algs[] = {
956 .cra_blocksize = SHA1_BLOCK_SIZE, 1012 .cra_blocksize = SHA1_BLOCK_SIZE,
957 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1013 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
958 sizeof(struct omap_sham_hmac_ctx), 1014 sizeof(struct omap_sham_hmac_ctx),
959 .cra_alignmask = 0, 1015 .cra_alignmask = OMAP_ALIGN_MASK,
960 .cra_module = THIS_MODULE, 1016 .cra_module = THIS_MODULE,
961 .cra_init = omap_sham_cra_sha1_init, 1017 .cra_init = omap_sham_cra_sha1_init,
962 .cra_exit = omap_sham_cra_exit, 1018 .cra_exit = omap_sham_cra_exit,
@@ -980,7 +1036,7 @@ static struct ahash_alg algs[] = {
980 .cra_blocksize = SHA1_BLOCK_SIZE, 1036 .cra_blocksize = SHA1_BLOCK_SIZE,
981 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1037 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
982 sizeof(struct omap_sham_hmac_ctx), 1038 sizeof(struct omap_sham_hmac_ctx),
983 .cra_alignmask = 0, 1039 .cra_alignmask = OMAP_ALIGN_MASK,
984 .cra_module = THIS_MODULE, 1040 .cra_module = THIS_MODULE,
985 .cra_init = omap_sham_cra_md5_init, 1041 .cra_init = omap_sham_cra_md5_init,
986 .cra_exit = omap_sham_cra_exit, 1042 .cra_exit = omap_sham_cra_exit,
@@ -993,7 +1049,7 @@ static void omap_sham_done_task(unsigned long data)
993 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1049 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
994 struct ahash_request *req = dd->req; 1050 struct ahash_request *req = dd->req;
995 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1051 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
996 int ready = 1; 1052 int ready = 0, err = 0;
997 1053
998 if (ctx->flags & FLAGS_OUTPUT_READY) { 1054 if (ctx->flags & FLAGS_OUTPUT_READY) {
999 ctx->flags &= ~FLAGS_OUTPUT_READY; 1055 ctx->flags &= ~FLAGS_OUTPUT_READY;
@@ -1003,15 +1059,18 @@ static void omap_sham_done_task(unsigned long data)
1003 if (dd->flags & FLAGS_DMA_ACTIVE) { 1059 if (dd->flags & FLAGS_DMA_ACTIVE) {
1004 dd->flags &= ~FLAGS_DMA_ACTIVE; 1060 dd->flags &= ~FLAGS_DMA_ACTIVE;
1005 omap_sham_update_dma_stop(dd); 1061 omap_sham_update_dma_stop(dd);
1006 omap_sham_update_dma_slow(dd); 1062 if (!dd->err)
1063 err = omap_sham_update_dma_start(dd);
1007 } 1064 }
1008 1065
1009 if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { 1066 err = dd->err ? : err;
1010 dev_dbg(dd->dev, "update done\n"); 1067
1068 if (err != -EINPROGRESS && (ready || err)) {
1069 dev_dbg(dd->dev, "update done: err: %d\n", err);
1011 /* finish curent request */ 1070 /* finish curent request */
1012 omap_sham_finish_req(req, 0); 1071 omap_sham_finish_req(req, err);
1013 /* start new request */ 1072 /* start new request */
1014 omap_sham_handle_queue(dd); 1073 omap_sham_handle_queue(dd, NULL);
1015 } 1074 }
1016} 1075}
1017 1076
@@ -1019,7 +1078,7 @@ static void omap_sham_queue_task(unsigned long data)
1019{ 1078{
1020 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1079 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1021 1080
1022 omap_sham_handle_queue(dd); 1081 omap_sham_handle_queue(dd, NULL);
1023} 1082}
1024 1083
1025static irqreturn_t omap_sham_irq(int irq, void *dev_id) 1084static irqreturn_t omap_sham_irq(int irq, void *dev_id)
@@ -1041,6 +1100,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1041 omap_sham_read(dd, SHA_REG_CTRL); 1100 omap_sham_read(dd, SHA_REG_CTRL);
1042 1101
1043 ctx->flags |= FLAGS_OUTPUT_READY; 1102 ctx->flags |= FLAGS_OUTPUT_READY;
1103 dd->err = 0;
1044 tasklet_schedule(&dd->done_task); 1104 tasklet_schedule(&dd->done_task);
1045 1105
1046 return IRQ_HANDLED; 1106 return IRQ_HANDLED;
@@ -1050,8 +1110,13 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1050{ 1110{
1051 struct omap_sham_dev *dd = data; 1111 struct omap_sham_dev *dd = data;
1052 1112
1053 if (likely(lch == dd->dma_lch)) 1113 if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1054 tasklet_schedule(&dd->done_task); 1114 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1115 dd->err = -EIO;
1116 dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
1117 }
1118
1119 tasklet_schedule(&dd->done_task);
1055} 1120}
1056 1121
1057static int omap_sham_dma_init(struct omap_sham_dev *dd) 1122static int omap_sham_dma_init(struct omap_sham_dev *dd)
@@ -1066,15 +1131,6 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd)
1066 dev_err(dd->dev, "Unable to request DMA channel\n"); 1131 dev_err(dd->dev, "Unable to request DMA channel\n");
1067 return err; 1132 return err;
1068 } 1133 }
1069 omap_set_dma_dest_params(dd->dma_lch, 0,
1070 OMAP_DMA_AMODE_CONSTANT,
1071 dd->phys_base + SHA_REG_DIN(0), 0, 16);
1072
1073 omap_set_dma_dest_burst_mode(dd->dma_lch,
1074 OMAP_DMA_DATA_BURST_16);
1075
1076 omap_set_dma_src_burst_mode(dd->dma_lch,
1077 OMAP_DMA_DATA_BURST_4);
1078 1134
1079 return 0; 1135 return 0;
1080} 1136}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 8a515baa38f7..db33d300aa23 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -9,6 +9,7 @@
9 9
10#include <crypto/algapi.h> 10#include <crypto/algapi.h>
11#include <crypto/aes.h> 11#include <crypto/aes.h>
12#include <crypto/padlock.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/types.h> 15#include <linux/types.h>
@@ -21,7 +22,6 @@
21#include <asm/byteorder.h> 22#include <asm/byteorder.h>
22#include <asm/processor.h> 23#include <asm/processor.h>
23#include <asm/i387.h> 24#include <asm/i387.h>
24#include "padlock.h"
25 25
26/* 26/*
27 * Number of data blocks actually fetched for each xcrypt insn. 27 * Number of data blocks actually fetched for each xcrypt insn.
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d3a27e0119bc..adf075b6b9a8 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <crypto/internal/hash.h> 15#include <crypto/internal/hash.h>
16#include <crypto/padlock.h>
16#include <crypto/sha.h> 17#include <crypto/sha.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/module.h> 19#include <linux/module.h>
@@ -22,13 +23,6 @@
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
24#include <asm/i387.h> 25#include <asm/i387.h>
25#include "padlock.h"
26
27#ifdef CONFIG_64BIT
28#define STACK_ALIGN 16
29#else
30#define STACK_ALIGN 4
31#endif
32 26
33struct padlock_sha_desc { 27struct padlock_sha_desc {
34 struct shash_desc fallback; 28 struct shash_desc fallback;
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h
deleted file mode 100644
index b728e4518bd1..000000000000
--- a/drivers/crypto/padlock.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Driver for VIA PadLock
3 *
4 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#ifndef _CRYPTO_PADLOCK_H
14#define _CRYPTO_PADLOCK_H
15
16#define PADLOCK_ALIGNMENT 16
17
18#define PFX "padlock: "
19
20#define PADLOCK_CRA_PRIORITY 300
21#define PADLOCK_COMPOSITE_PRIORITY 400
22
23#endif /* _CRYPTO_PADLOCK_H */