aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/omap-sham.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/omap-sham.c')
-rw-r--r--drivers/crypto/omap-sham.c418
1 files changed, 234 insertions, 184 deletions
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7d1485676886..ba8f1ea84c5e 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,24 +72,26 @@
72 72
73#define DEFAULT_TIMEOUT_INTERVAL HZ 73#define DEFAULT_TIMEOUT_INTERVAL HZ
74 74
75#define FLAGS_FIRST 0x0001
76#define FLAGS_FINUP 0x0002 75#define FLAGS_FINUP 0x0002
77#define FLAGS_FINAL 0x0004 76#define FLAGS_FINAL 0x0004
78#define FLAGS_FAST 0x0008 77#define FLAGS_SG 0x0008
79#define FLAGS_SHA1 0x0010 78#define FLAGS_SHA1 0x0010
80#define FLAGS_DMA_ACTIVE 0x0020 79#define FLAGS_DMA_ACTIVE 0x0020
81#define FLAGS_OUTPUT_READY 0x0040 80#define FLAGS_OUTPUT_READY 0x0040
82#define FLAGS_CLEAN 0x0080
83#define FLAGS_INIT 0x0100 81#define FLAGS_INIT 0x0100
84#define FLAGS_CPU 0x0200 82#define FLAGS_CPU 0x0200
85#define FLAGS_HMAC 0x0400 83#define FLAGS_HMAC 0x0400
86 84#define FLAGS_ERROR 0x0800
87/* 3rd byte */ 85#define FLAGS_BUSY 0x1000
88#define FLAGS_BUSY 16
89 86
90#define OP_UPDATE 1 87#define OP_UPDATE 1
91#define OP_FINAL 2 88#define OP_FINAL 2
92 89
90#define OMAP_ALIGN_MASK (sizeof(u32)-1)
91#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
92
93#define BUFLEN PAGE_SIZE
94
93struct omap_sham_dev; 95struct omap_sham_dev;
94 96
95struct omap_sham_reqctx { 97struct omap_sham_reqctx {
@@ -97,8 +99,8 @@ struct omap_sham_reqctx {
97 unsigned long flags; 99 unsigned long flags;
98 unsigned long op; 100 unsigned long op;
99 101
102 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
100 size_t digcnt; 103 size_t digcnt;
101 u8 *buffer;
102 size_t bufcnt; 104 size_t bufcnt;
103 size_t buflen; 105 size_t buflen;
104 dma_addr_t dma_addr; 106 dma_addr_t dma_addr;
@@ -107,6 +109,8 @@ struct omap_sham_reqctx {
107 struct scatterlist *sg; 109 struct scatterlist *sg;
108 unsigned int offset; /* offset in current sg */ 110 unsigned int offset; /* offset in current sg */
109 unsigned int total; /* total request */ 111 unsigned int total; /* total request */
112
113 u8 buffer[0] OMAP_ALIGNED;
110}; 114};
111 115
112struct omap_sham_hmac_ctx { 116struct omap_sham_hmac_ctx {
@@ -136,6 +140,7 @@ struct omap_sham_dev {
136 int irq; 140 int irq;
137 struct clk *iclk; 141 struct clk *iclk;
138 spinlock_t lock; 142 spinlock_t lock;
143 int err;
139 int dma; 144 int dma;
140 int dma_lch; 145 int dma_lch;
141 struct tasklet_struct done_task; 146 struct tasklet_struct done_task;
@@ -194,53 +199,68 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
194static void omap_sham_copy_hash(struct ahash_request *req, int out) 199static void omap_sham_copy_hash(struct ahash_request *req, int out)
195{ 200{
196 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 201 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
202 u32 *hash = (u32 *)ctx->digest;
203 int i;
204
205 /* MD5 is almost unused. So copy sha1 size to reduce code */
206 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
207 if (out)
208 hash[i] = omap_sham_read(ctx->dd,
209 SHA_REG_DIGEST(i));
210 else
211 omap_sham_write(ctx->dd,
212 SHA_REG_DIGEST(i), hash[i]);
213 }
214}
215
216static void omap_sham_copy_ready_hash(struct ahash_request *req)
217{
218 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
219 u32 *in = (u32 *)ctx->digest;
197 u32 *hash = (u32 *)req->result; 220 u32 *hash = (u32 *)req->result;
198 int i; 221 int i;
199 222
223 if (!hash)
224 return;
225
200 if (likely(ctx->flags & FLAGS_SHA1)) { 226 if (likely(ctx->flags & FLAGS_SHA1)) {
201 /* SHA1 results are in big endian */ 227 /* SHA1 results are in big endian */
202 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) 228 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
203 if (out) 229 hash[i] = be32_to_cpu(in[i]);
204 hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
205 SHA_REG_DIGEST(i)));
206 else
207 omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
208 cpu_to_be32(hash[i]));
209 } else { 230 } else {
210 /* MD5 results are in little endian */ 231 /* MD5 results are in little endian */
211 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) 232 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
212 if (out) 233 hash[i] = le32_to_cpu(in[i]);
213 hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
214 SHA_REG_DIGEST(i)));
215 else
216 omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
217 cpu_to_le32(hash[i]));
218 } 234 }
219} 235}
220 236
221static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, 237static int omap_sham_hw_init(struct omap_sham_dev *dd)
222 int final, int dma)
223{ 238{
224 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 239 clk_enable(dd->iclk);
225 u32 val = length << 5, mask;
226 240
227 if (unlikely(!ctx->digcnt)) { 241 if (!(dd->flags & FLAGS_INIT)) {
242 omap_sham_write_mask(dd, SHA_REG_MASK,
243 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
228 244
229 clk_enable(dd->iclk); 245 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
246 SHA_REG_SYSSTATUS_RESETDONE))
247 return -ETIMEDOUT;
248
249 dd->flags |= FLAGS_INIT;
250 dd->err = 0;
251 }
230 252
231 if (!(dd->flags & FLAGS_INIT)) { 253 return 0;
232 omap_sham_write_mask(dd, SHA_REG_MASK, 254}
233 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
234 255
235 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, 256static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
236 SHA_REG_SYSSTATUS_RESETDONE)) 257 int final, int dma)
237 return -ETIMEDOUT; 258{
259 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
260 u32 val = length << 5, mask;
238 261
239 dd->flags |= FLAGS_INIT; 262 if (likely(ctx->digcnt))
240 }
241 } else {
242 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); 263 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
243 }
244 264
245 omap_sham_write_mask(dd, SHA_REG_MASK, 265 omap_sham_write_mask(dd, SHA_REG_MASK,
246 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), 266 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
@@ -260,29 +280,26 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
260 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; 280 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
261 281
262 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); 282 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
263
264 return 0;
265} 283}
266 284
267static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, 285static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
268 size_t length, int final) 286 size_t length, int final)
269{ 287{
270 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 288 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
271 int err, count, len32; 289 int count, len32;
272 const u32 *buffer = (const u32 *)buf; 290 const u32 *buffer = (const u32 *)buf;
273 291
274 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", 292 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
275 ctx->digcnt, length, final); 293 ctx->digcnt, length, final);
276 294
277 err = omap_sham_write_ctrl(dd, length, final, 0); 295 omap_sham_write_ctrl(dd, length, final, 0);
278 if (err) 296
279 return err; 297 /* should be non-zero before next lines to disable clocks later */
298 ctx->digcnt += length;
280 299
281 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) 300 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
282 return -ETIMEDOUT; 301 return -ETIMEDOUT;
283 302
284 ctx->digcnt += length;
285
286 if (final) 303 if (final)
287 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ 304 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
288 305
@@ -298,27 +315,21 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
298 size_t length, int final) 315 size_t length, int final)
299{ 316{
300 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 317 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
301 int err, len32; 318 int len32;
302 319
303 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", 320 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
304 ctx->digcnt, length, final); 321 ctx->digcnt, length, final);
305 322
306 /* flush cache entries related to our page */
307 if (dma_addr == ctx->dma_addr)
308 dma_sync_single_for_device(dd->dev, dma_addr, length,
309 DMA_TO_DEVICE);
310
311 len32 = DIV_ROUND_UP(length, sizeof(u32)); 323 len32 = DIV_ROUND_UP(length, sizeof(u32));
312 324
313 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 325 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
314 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC); 326 1, OMAP_DMA_SYNC_PACKET, dd->dma,
327 OMAP_DMA_DST_SYNC_PREFETCH);
315 328
316 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 329 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
317 dma_addr, 0, 0); 330 dma_addr, 0, 0);
318 331
319 err = omap_sham_write_ctrl(dd, length, final, 1); 332 omap_sham_write_ctrl(dd, length, final, 1);
320 if (err)
321 return err;
322 333
323 ctx->digcnt += length; 334 ctx->digcnt += length;
324 335
@@ -370,15 +381,29 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
370 return 0; 381 return 0;
371} 382}
372 383
384static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
385 struct omap_sham_reqctx *ctx,
386 size_t length, int final)
387{
388 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
389 DMA_TO_DEVICE);
390 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
391 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
392 return -EINVAL;
393 }
394
395 ctx->flags &= ~FLAGS_SG;
396
397 /* next call does not fail... so no unmap in the case of error */
398 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
399}
400
373static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) 401static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
374{ 402{
375 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 403 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
376 unsigned int final; 404 unsigned int final;
377 size_t count; 405 size_t count;
378 406
379 if (!ctx->total)
380 return 0;
381
382 omap_sham_append_sg(ctx); 407 omap_sham_append_sg(ctx);
383 408
384 final = (ctx->flags & FLAGS_FINUP) && !ctx->total; 409 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
@@ -389,30 +414,68 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
389 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { 414 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
390 count = ctx->bufcnt; 415 count = ctx->bufcnt;
391 ctx->bufcnt = 0; 416 ctx->bufcnt = 0;
392 return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); 417 return omap_sham_xmit_dma_map(dd, ctx, count, final);
393 } 418 }
394 419
395 return 0; 420 return 0;
396} 421}
397 422
398static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) 423/* Start address alignment */
424#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
425/* SHA1 block size alignment */
426#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
427
428static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
399{ 429{
400 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 430 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
401 unsigned int length; 431 unsigned int length, final, tail;
432 struct scatterlist *sg;
433
434 if (!ctx->total)
435 return 0;
436
437 if (ctx->bufcnt || ctx->offset)
438 return omap_sham_update_dma_slow(dd);
439
440 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
441 ctx->digcnt, ctx->bufcnt, ctx->total);
402 442
403 ctx->flags |= FLAGS_FAST; 443 sg = ctx->sg;
404 444
405 length = min(ctx->total, sg_dma_len(ctx->sg)); 445 if (!SG_AA(sg))
406 ctx->total = length; 446 return omap_sham_update_dma_slow(dd);
447
448 if (!sg_is_last(sg) && !SG_SA(sg))
449 /* size is not SHA1_BLOCK_SIZE aligned */
450 return omap_sham_update_dma_slow(dd);
451
452 length = min(ctx->total, sg->length);
453
454 if (sg_is_last(sg)) {
455 if (!(ctx->flags & FLAGS_FINUP)) {
456 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
457 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
458 /* without finup() we need one block to close hash */
459 if (!tail)
460 tail = SHA1_MD5_BLOCK_SIZE;
461 length -= tail;
462 }
463 }
407 464
408 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 465 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
409 dev_err(dd->dev, "dma_map_sg error\n"); 466 dev_err(dd->dev, "dma_map_sg error\n");
410 return -EINVAL; 467 return -EINVAL;
411 } 468 }
412 469
470 ctx->flags |= FLAGS_SG;
471
413 ctx->total -= length; 472 ctx->total -= length;
473 ctx->offset = length; /* offset where to start slow */
474
475 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
414 476
415 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); 477 /* next call does not fail... so no unmap in the case of error */
478 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
416} 479}
417 480
418static int omap_sham_update_cpu(struct omap_sham_dev *dd) 481static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -432,37 +495,19 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
432 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 495 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
433 496
434 omap_stop_dma(dd->dma_lch); 497 omap_stop_dma(dd->dma_lch);
435 if (ctx->flags & FLAGS_FAST) 498 if (ctx->flags & FLAGS_SG) {
436 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 499 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
437 500 if (ctx->sg->length == ctx->offset) {
438 return 0; 501 ctx->sg = sg_next(ctx->sg);
439} 502 if (ctx->sg)
440 503 ctx->offset = 0;
441static void omap_sham_cleanup(struct ahash_request *req) 504 }
442{ 505 } else {
443 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
444 struct omap_sham_dev *dd = ctx->dd;
445 unsigned long flags;
446
447 spin_lock_irqsave(&dd->lock, flags);
448 if (ctx->flags & FLAGS_CLEAN) {
449 spin_unlock_irqrestore(&dd->lock, flags);
450 return;
451 }
452 ctx->flags |= FLAGS_CLEAN;
453 spin_unlock_irqrestore(&dd->lock, flags);
454
455 if (ctx->digcnt)
456 clk_disable(dd->iclk);
457
458 if (ctx->dma_addr)
459 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, 506 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
460 DMA_TO_DEVICE); 507 DMA_TO_DEVICE);
508 }
461 509
462 if (ctx->buffer) 510 return 0;
463 free_page((unsigned long)ctx->buffer);
464
465 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
466} 511}
467 512
468static int omap_sham_init(struct ahash_request *req) 513static int omap_sham_init(struct ahash_request *req)
@@ -488,8 +533,6 @@ static int omap_sham_init(struct ahash_request *req)
488 533
489 ctx->flags = 0; 534 ctx->flags = 0;
490 535
491 ctx->flags |= FLAGS_FIRST;
492
493 dev_dbg(dd->dev, "init: digest size: %d\n", 536 dev_dbg(dd->dev, "init: digest size: %d\n",
494 crypto_ahash_digestsize(tfm)); 537 crypto_ahash_digestsize(tfm));
495 538
@@ -498,21 +541,7 @@ static int omap_sham_init(struct ahash_request *req)
498 541
499 ctx->bufcnt = 0; 542 ctx->bufcnt = 0;
500 ctx->digcnt = 0; 543 ctx->digcnt = 0;
501 544 ctx->buflen = BUFLEN;
502 ctx->buflen = PAGE_SIZE;
503 ctx->buffer = (void *)__get_free_page(
504 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
505 GFP_KERNEL : GFP_ATOMIC);
506 if (!ctx->buffer)
507 return -ENOMEM;
508
509 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
510 DMA_TO_DEVICE);
511 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
512 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
513 free_page((unsigned long)ctx->buffer);
514 return -EINVAL;
515 }
516 545
517 if (tctx->flags & FLAGS_HMAC) { 546 if (tctx->flags & FLAGS_HMAC) {
518 struct omap_sham_hmac_ctx *bctx = tctx->base; 547 struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -537,10 +566,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
537 566
538 if (ctx->flags & FLAGS_CPU) 567 if (ctx->flags & FLAGS_CPU)
539 err = omap_sham_update_cpu(dd); 568 err = omap_sham_update_cpu(dd);
540 else if (ctx->flags & FLAGS_FAST)
541 err = omap_sham_update_dma_fast(dd);
542 else 569 else
543 err = omap_sham_update_dma_slow(dd); 570 err = omap_sham_update_dma_start(dd);
544 571
545 /* wait for dma completion before can take more data */ 572 /* wait for dma completion before can take more data */
546 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); 573 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -559,21 +586,18 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
559 use_dma = 0; 586 use_dma = 0;
560 587
561 if (use_dma) 588 if (use_dma)
562 err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); 589 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
563 else 590 else
564 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); 591 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
565 592
566 ctx->bufcnt = 0; 593 ctx->bufcnt = 0;
567 594
568 if (err != -EINPROGRESS)
569 omap_sham_cleanup(req);
570
571 dev_dbg(dd->dev, "final_req: err: %d\n", err); 595 dev_dbg(dd->dev, "final_req: err: %d\n", err);
572 596
573 return err; 597 return err;
574} 598}
575 599
576static int omap_sham_finish_req_hmac(struct ahash_request *req) 600static int omap_sham_finish_hmac(struct ahash_request *req)
577{ 601{
578 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 602 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
579 struct omap_sham_hmac_ctx *bctx = tctx->base; 603 struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -592,45 +616,67 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req)
592 crypto_shash_finup(&desc.shash, req->result, ds, req->result); 616 crypto_shash_finup(&desc.shash, req->result, ds, req->result);
593} 617}
594 618
619static int omap_sham_finish(struct ahash_request *req)
620{
621 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
622 struct omap_sham_dev *dd = ctx->dd;
623 int err = 0;
624
625 if (ctx->digcnt) {
626 omap_sham_copy_ready_hash(req);
627 if (ctx->flags & FLAGS_HMAC)
628 err = omap_sham_finish_hmac(req);
629 }
630
631 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
632
633 return err;
634}
635
595static void omap_sham_finish_req(struct ahash_request *req, int err) 636static void omap_sham_finish_req(struct ahash_request *req, int err)
596{ 637{
597 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 638 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
639 struct omap_sham_dev *dd = ctx->dd;
598 640
599 if (!err) { 641 if (!err) {
600 omap_sham_copy_hash(ctx->dd->req, 1); 642 omap_sham_copy_hash(ctx->dd->req, 1);
601 if (ctx->flags & FLAGS_HMAC) 643 if (ctx->flags & FLAGS_FINAL)
602 err = omap_sham_finish_req_hmac(req); 644 err = omap_sham_finish(req);
645 } else {
646 ctx->flags |= FLAGS_ERROR;
603 } 647 }
604 648
605 if (ctx->flags & FLAGS_FINAL) 649 clk_disable(dd->iclk);
606 omap_sham_cleanup(req); 650 dd->flags &= ~FLAGS_BUSY;
607
608 clear_bit(FLAGS_BUSY, &ctx->dd->flags);
609 651
610 if (req->base.complete) 652 if (req->base.complete)
611 req->base.complete(&req->base, err); 653 req->base.complete(&req->base, err);
612} 654}
613 655
614static int omap_sham_handle_queue(struct omap_sham_dev *dd) 656static int omap_sham_handle_queue(struct omap_sham_dev *dd,
657 struct ahash_request *req)
615{ 658{
616 struct crypto_async_request *async_req, *backlog; 659 struct crypto_async_request *async_req, *backlog;
617 struct omap_sham_reqctx *ctx; 660 struct omap_sham_reqctx *ctx;
618 struct ahash_request *req, *prev_req; 661 struct ahash_request *prev_req;
619 unsigned long flags; 662 unsigned long flags;
620 int err = 0; 663 int err = 0, ret = 0;
621
622 if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
623 return 0;
624 664
625 spin_lock_irqsave(&dd->lock, flags); 665 spin_lock_irqsave(&dd->lock, flags);
666 if (req)
667 ret = ahash_enqueue_request(&dd->queue, req);
668 if (dd->flags & FLAGS_BUSY) {
669 spin_unlock_irqrestore(&dd->lock, flags);
670 return ret;
671 }
626 backlog = crypto_get_backlog(&dd->queue); 672 backlog = crypto_get_backlog(&dd->queue);
627 async_req = crypto_dequeue_request(&dd->queue); 673 async_req = crypto_dequeue_request(&dd->queue);
628 if (!async_req) 674 if (async_req)
629 clear_bit(FLAGS_BUSY, &dd->flags); 675 dd->flags |= FLAGS_BUSY;
630 spin_unlock_irqrestore(&dd->lock, flags); 676 spin_unlock_irqrestore(&dd->lock, flags);
631 677
632 if (!async_req) 678 if (!async_req)
633 return 0; 679 return ret;
634 680
635 if (backlog) 681 if (backlog)
636 backlog->complete(backlog, -EINPROGRESS); 682 backlog->complete(backlog, -EINPROGRESS);
@@ -645,7 +691,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
645 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 691 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
646 ctx->op, req->nbytes); 692 ctx->op, req->nbytes);
647 693
648 if (req != prev_req && ctx->digcnt) 694
695 err = omap_sham_hw_init(dd);
696 if (err)
697 goto err1;
698
699 omap_set_dma_dest_params(dd->dma_lch, 0,
700 OMAP_DMA_AMODE_CONSTANT,
701 dd->phys_base + SHA_REG_DIN(0), 0, 16);
702
703 omap_set_dma_dest_burst_mode(dd->dma_lch,
704 OMAP_DMA_DATA_BURST_16);
705
706 omap_set_dma_src_burst_mode(dd->dma_lch,
707 OMAP_DMA_DATA_BURST_4);
708
709 if (ctx->digcnt)
649 /* request has changed - restore hash */ 710 /* request has changed - restore hash */
650 omap_sham_copy_hash(req, 0); 711 omap_sham_copy_hash(req, 0);
651 712
@@ -657,7 +718,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
657 } else if (ctx->op == OP_FINAL) { 718 } else if (ctx->op == OP_FINAL) {
658 err = omap_sham_final_req(dd); 719 err = omap_sham_final_req(dd);
659 } 720 }
660 721err1:
661 if (err != -EINPROGRESS) { 722 if (err != -EINPROGRESS) {
662 /* done_task will not finish it, so do it here */ 723 /* done_task will not finish it, so do it here */
663 omap_sham_finish_req(req, err); 724 omap_sham_finish_req(req, err);
@@ -666,7 +727,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
666 727
667 dev_dbg(dd->dev, "exit, err: %d\n", err); 728 dev_dbg(dd->dev, "exit, err: %d\n", err);
668 729
669 return err; 730 return ret;
670} 731}
671 732
672static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) 733static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
@@ -674,18 +735,10 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
674 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 735 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
675 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 736 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
676 struct omap_sham_dev *dd = tctx->dd; 737 struct omap_sham_dev *dd = tctx->dd;
677 unsigned long flags;
678 int err;
679 738
680 ctx->op = op; 739 ctx->op = op;
681 740
682 spin_lock_irqsave(&dd->lock, flags); 741 return omap_sham_handle_queue(dd, req);
683 err = ahash_enqueue_request(&dd->queue, req);
684 spin_unlock_irqrestore(&dd->lock, flags);
685
686 omap_sham_handle_queue(dd);
687
688 return err;
689} 742}
690 743
691static int omap_sham_update(struct ahash_request *req) 744static int omap_sham_update(struct ahash_request *req)
@@ -708,21 +761,13 @@ static int omap_sham_update(struct ahash_request *req)
708 */ 761 */
709 omap_sham_append_sg(ctx); 762 omap_sham_append_sg(ctx);
710 return 0; 763 return 0;
711 } else if (ctx->bufcnt + ctx->total <= 64) { 764 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
765 /*
766 * faster to use CPU for short transfers
767 */
712 ctx->flags |= FLAGS_CPU; 768 ctx->flags |= FLAGS_CPU;
713 } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
714 /* may be can use faster functions */
715 int aligned = IS_ALIGNED((u32)ctx->sg->offset,
716 sizeof(u32));
717
718 if (aligned && (ctx->flags & FLAGS_FIRST))
719 /* digest: first and final */
720 ctx->flags |= FLAGS_FAST;
721
722 ctx->flags &= ~FLAGS_FIRST;
723 } 769 }
724 } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { 770 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
725 /* if not finaup -> not fast */
726 omap_sham_append_sg(ctx); 771 omap_sham_append_sg(ctx);
727 return 0; 772 return 0;
728 } 773 }
@@ -756,20 +801,21 @@ static int omap_sham_final_shash(struct ahash_request *req)
756static int omap_sham_final(struct ahash_request *req) 801static int omap_sham_final(struct ahash_request *req)
757{ 802{
758 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 803 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
759 int err = 0;
760 804
761 ctx->flags |= FLAGS_FINUP; 805 ctx->flags |= FLAGS_FINUP;
762 806
807 if (ctx->flags & FLAGS_ERROR)
808 return 0; /* uncompleted hash is not needed */
809
763 /* OMAP HW accel works only with buffers >= 9 */ 810 /* OMAP HW accel works only with buffers >= 9 */
764 /* HMAC is always >= 9 because of ipad */ 811 /* HMAC is always >= 9 because ipad == block size */
765 if ((ctx->digcnt + ctx->bufcnt) < 9) 812 if ((ctx->digcnt + ctx->bufcnt) < 9)
766 err = omap_sham_final_shash(req); 813 return omap_sham_final_shash(req);
767 else if (ctx->bufcnt) 814 else if (ctx->bufcnt)
768 return omap_sham_enqueue(req, OP_FINAL); 815 return omap_sham_enqueue(req, OP_FINAL);
769 816
770 omap_sham_cleanup(req); 817 /* copy ready hash (+ finalize hmac) */
771 818 return omap_sham_finish(req);
772 return err;
773} 819}
774 820
775static int omap_sham_finup(struct ahash_request *req) 821static int omap_sham_finup(struct ahash_request *req)
@@ -780,7 +826,7 @@ static int omap_sham_finup(struct ahash_request *req)
780 ctx->flags |= FLAGS_FINUP; 826 ctx->flags |= FLAGS_FINUP;
781 827
782 err1 = omap_sham_update(req); 828 err1 = omap_sham_update(req);
783 if (err1 == -EINPROGRESS) 829 if (err1 == -EINPROGRESS || err1 == -EBUSY)
784 return err1; 830 return err1;
785 /* 831 /*
786 * final() has to be always called to cleanup resources 832 * final() has to be always called to cleanup resources
@@ -845,7 +891,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
845 } 891 }
846 892
847 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 893 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
848 sizeof(struct omap_sham_reqctx)); 894 sizeof(struct omap_sham_reqctx) + BUFLEN);
849 895
850 if (alg_base) { 896 if (alg_base) {
851 struct omap_sham_hmac_ctx *bctx = tctx->base; 897 struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -931,7 +977,7 @@ static struct ahash_alg algs[] = {
931 CRYPTO_ALG_NEED_FALLBACK, 977 CRYPTO_ALG_NEED_FALLBACK,
932 .cra_blocksize = SHA1_BLOCK_SIZE, 978 .cra_blocksize = SHA1_BLOCK_SIZE,
933 .cra_ctxsize = sizeof(struct omap_sham_ctx), 979 .cra_ctxsize = sizeof(struct omap_sham_ctx),
934 .cra_alignmask = 0, 980 .cra_alignmask = OMAP_ALIGN_MASK,
935 .cra_module = THIS_MODULE, 981 .cra_module = THIS_MODULE,
936 .cra_init = omap_sham_cra_init, 982 .cra_init = omap_sham_cra_init,
937 .cra_exit = omap_sham_cra_exit, 983 .cra_exit = omap_sham_cra_exit,
@@ -955,7 +1001,7 @@ static struct ahash_alg algs[] = {
955 .cra_blocksize = SHA1_BLOCK_SIZE, 1001 .cra_blocksize = SHA1_BLOCK_SIZE,
956 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1002 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
957 sizeof(struct omap_sham_hmac_ctx), 1003 sizeof(struct omap_sham_hmac_ctx),
958 .cra_alignmask = 0, 1004 .cra_alignmask = OMAP_ALIGN_MASK,
959 .cra_module = THIS_MODULE, 1005 .cra_module = THIS_MODULE,
960 .cra_init = omap_sham_cra_sha1_init, 1006 .cra_init = omap_sham_cra_sha1_init,
961 .cra_exit = omap_sham_cra_exit, 1007 .cra_exit = omap_sham_cra_exit,
@@ -979,7 +1025,7 @@ static struct ahash_alg algs[] = {
979 .cra_blocksize = SHA1_BLOCK_SIZE, 1025 .cra_blocksize = SHA1_BLOCK_SIZE,
980 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1026 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
981 sizeof(struct omap_sham_hmac_ctx), 1027 sizeof(struct omap_sham_hmac_ctx),
982 .cra_alignmask = 0, 1028 .cra_alignmask = OMAP_ALIGN_MASK,
983 .cra_module = THIS_MODULE, 1029 .cra_module = THIS_MODULE,
984 .cra_init = omap_sham_cra_md5_init, 1030 .cra_init = omap_sham_cra_md5_init,
985 .cra_exit = omap_sham_cra_exit, 1031 .cra_exit = omap_sham_cra_exit,
@@ -992,7 +1038,7 @@ static void omap_sham_done_task(unsigned long data)
992 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1038 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
993 struct ahash_request *req = dd->req; 1039 struct ahash_request *req = dd->req;
994 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1040 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
995 int ready = 1; 1041 int ready = 0, err = 0;
996 1042
997 if (ctx->flags & FLAGS_OUTPUT_READY) { 1043 if (ctx->flags & FLAGS_OUTPUT_READY) {
998 ctx->flags &= ~FLAGS_OUTPUT_READY; 1044 ctx->flags &= ~FLAGS_OUTPUT_READY;
@@ -1002,15 +1048,18 @@ static void omap_sham_done_task(unsigned long data)
1002 if (dd->flags & FLAGS_DMA_ACTIVE) { 1048 if (dd->flags & FLAGS_DMA_ACTIVE) {
1003 dd->flags &= ~FLAGS_DMA_ACTIVE; 1049 dd->flags &= ~FLAGS_DMA_ACTIVE;
1004 omap_sham_update_dma_stop(dd); 1050 omap_sham_update_dma_stop(dd);
1005 omap_sham_update_dma_slow(dd); 1051 if (!dd->err)
1052 err = omap_sham_update_dma_start(dd);
1006 } 1053 }
1007 1054
1008 if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { 1055 err = dd->err ? : err;
1009 dev_dbg(dd->dev, "update done\n"); 1056
1057 if (err != -EINPROGRESS && (ready || err)) {
1058 dev_dbg(dd->dev, "update done: err: %d\n", err);
1010 /* finish curent request */ 1059 /* finish curent request */
1011 omap_sham_finish_req(req, 0); 1060 omap_sham_finish_req(req, err);
1012 /* start new request */ 1061 /* start new request */
1013 omap_sham_handle_queue(dd); 1062 omap_sham_handle_queue(dd, NULL);
1014 } 1063 }
1015} 1064}
1016 1065
@@ -1018,7 +1067,7 @@ static void omap_sham_queue_task(unsigned long data)
1018{ 1067{
1019 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1068 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1020 1069
1021 omap_sham_handle_queue(dd); 1070 omap_sham_handle_queue(dd, NULL);
1022} 1071}
1023 1072
1024static irqreturn_t omap_sham_irq(int irq, void *dev_id) 1073static irqreturn_t omap_sham_irq(int irq, void *dev_id)
@@ -1040,6 +1089,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1040 omap_sham_read(dd, SHA_REG_CTRL); 1089 omap_sham_read(dd, SHA_REG_CTRL);
1041 1090
1042 ctx->flags |= FLAGS_OUTPUT_READY; 1091 ctx->flags |= FLAGS_OUTPUT_READY;
1092 dd->err = 0;
1043 tasklet_schedule(&dd->done_task); 1093 tasklet_schedule(&dd->done_task);
1044 1094
1045 return IRQ_HANDLED; 1095 return IRQ_HANDLED;
@@ -1049,8 +1099,13 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1049{ 1099{
1050 struct omap_sham_dev *dd = data; 1100 struct omap_sham_dev *dd = data;
1051 1101
1052 if (likely(lch == dd->dma_lch)) 1102 if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1053 tasklet_schedule(&dd->done_task); 1103 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1104 dd->err = -EIO;
1105 dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
1106 }
1107
1108 tasklet_schedule(&dd->done_task);
1054} 1109}
1055 1110
1056static int omap_sham_dma_init(struct omap_sham_dev *dd) 1111static int omap_sham_dma_init(struct omap_sham_dev *dd)
@@ -1065,12 +1120,6 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd)
1065 dev_err(dd->dev, "Unable to request DMA channel\n"); 1120 dev_err(dd->dev, "Unable to request DMA channel\n");
1066 return err; 1121 return err;
1067 } 1122 }
1068 omap_set_dma_dest_params(dd->dma_lch, 0,
1069 OMAP_DMA_AMODE_CONSTANT,
1070 dd->phys_base + SHA_REG_DIN(0), 0, 16);
1071
1072 omap_set_dma_dest_burst_mode(dd->dma_lch,
1073 OMAP_DMA_DATA_BURST_16);
1074 1123
1075 return 0; 1124 return 0;
1076} 1125}
@@ -1146,9 +1195,9 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
1146 1195
1147 /* Initializing the clock */ 1196 /* Initializing the clock */
1148 dd->iclk = clk_get(dev, "ick"); 1197 dd->iclk = clk_get(dev, "ick");
1149 if (!dd->iclk) { 1198 if (IS_ERR(dd->iclk)) {
1150 dev_err(dev, "clock intialization failed.\n"); 1199 dev_err(dev, "clock intialization failed.\n");
1151 err = -ENODEV; 1200 err = PTR_ERR(dd->iclk);
1152 goto clk_err; 1201 goto clk_err;
1153 } 1202 }
1154 1203
@@ -1237,7 +1286,8 @@ static int __init omap_sham_mod_init(void)
1237 pr_info("loading %s driver\n", "omap-sham"); 1286 pr_info("loading %s driver\n", "omap-sham");
1238 1287
1239 if (!cpu_class_is_omap2() || 1288 if (!cpu_class_is_omap2() ||
1240 omap_type() != OMAP2_DEVICE_TYPE_SEC) { 1289 (omap_type() != OMAP2_DEVICE_TYPE_SEC &&
1290 omap_type() != OMAP2_DEVICE_TYPE_EMU)) {
1241 pr_err("Unsupported cpu\n"); 1291 pr_err("Unsupported cpu\n");
1242 return -ENODEV; 1292 return -ENODEV;
1243 } 1293 }