diff options
Diffstat (limited to 'drivers/crypto/omap-sham.c')
-rw-r--r-- | drivers/crypto/omap-sham.c | 374 |
1 files changed, 215 insertions, 159 deletions
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a081c7c7d03..2e71123516e 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -72,10 +72,9 @@ | |||
72 | 72 | ||
73 | #define DEFAULT_TIMEOUT_INTERVAL HZ | 73 | #define DEFAULT_TIMEOUT_INTERVAL HZ |
74 | 74 | ||
75 | #define FLAGS_FIRST 0x0001 | ||
76 | #define FLAGS_FINUP 0x0002 | 75 | #define FLAGS_FINUP 0x0002 |
77 | #define FLAGS_FINAL 0x0004 | 76 | #define FLAGS_FINAL 0x0004 |
78 | #define FLAGS_FAST 0x0008 | 77 | #define FLAGS_SG 0x0008 |
79 | #define FLAGS_SHA1 0x0010 | 78 | #define FLAGS_SHA1 0x0010 |
80 | #define FLAGS_DMA_ACTIVE 0x0020 | 79 | #define FLAGS_DMA_ACTIVE 0x0020 |
81 | #define FLAGS_OUTPUT_READY 0x0040 | 80 | #define FLAGS_OUTPUT_READY 0x0040 |
@@ -83,13 +82,17 @@ | |||
83 | #define FLAGS_INIT 0x0100 | 82 | #define FLAGS_INIT 0x0100 |
84 | #define FLAGS_CPU 0x0200 | 83 | #define FLAGS_CPU 0x0200 |
85 | #define FLAGS_HMAC 0x0400 | 84 | #define FLAGS_HMAC 0x0400 |
86 | 85 | #define FLAGS_ERROR 0x0800 | |
87 | /* 3rd byte */ | 86 | #define FLAGS_BUSY 0x1000 |
88 | #define FLAGS_BUSY 16 | ||
89 | 87 | ||
90 | #define OP_UPDATE 1 | 88 | #define OP_UPDATE 1 |
91 | #define OP_FINAL 2 | 89 | #define OP_FINAL 2 |
92 | 90 | ||
91 | #define OMAP_ALIGN_MASK (sizeof(u32)-1) | ||
92 | #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) | ||
93 | |||
94 | #define BUFLEN PAGE_SIZE | ||
95 | |||
93 | struct omap_sham_dev; | 96 | struct omap_sham_dev; |
94 | 97 | ||
95 | struct omap_sham_reqctx { | 98 | struct omap_sham_reqctx { |
@@ -97,8 +100,8 @@ struct omap_sham_reqctx { | |||
97 | unsigned long flags; | 100 | unsigned long flags; |
98 | unsigned long op; | 101 | unsigned long op; |
99 | 102 | ||
103 | u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; | ||
100 | size_t digcnt; | 104 | size_t digcnt; |
101 | u8 *buffer; | ||
102 | size_t bufcnt; | 105 | size_t bufcnt; |
103 | size_t buflen; | 106 | size_t buflen; |
104 | dma_addr_t dma_addr; | 107 | dma_addr_t dma_addr; |
@@ -107,6 +110,8 @@ struct omap_sham_reqctx { | |||
107 | struct scatterlist *sg; | 110 | struct scatterlist *sg; |
108 | unsigned int offset; /* offset in current sg */ | 111 | unsigned int offset; /* offset in current sg */ |
109 | unsigned int total; /* total request */ | 112 | unsigned int total; /* total request */ |
113 | |||
114 | u8 buffer[0] OMAP_ALIGNED; | ||
110 | }; | 115 | }; |
111 | 116 | ||
112 | struct omap_sham_hmac_ctx { | 117 | struct omap_sham_hmac_ctx { |
@@ -136,6 +141,7 @@ struct omap_sham_dev { | |||
136 | int irq; | 141 | int irq; |
137 | struct clk *iclk; | 142 | struct clk *iclk; |
138 | spinlock_t lock; | 143 | spinlock_t lock; |
144 | int err; | ||
139 | int dma; | 145 | int dma; |
140 | int dma_lch; | 146 | int dma_lch; |
141 | struct tasklet_struct done_task; | 147 | struct tasklet_struct done_task; |
@@ -194,53 +200,68 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) | |||
194 | static void omap_sham_copy_hash(struct ahash_request *req, int out) | 200 | static void omap_sham_copy_hash(struct ahash_request *req, int out) |
195 | { | 201 | { |
196 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 202 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
203 | u32 *hash = (u32 *)ctx->digest; | ||
204 | int i; | ||
205 | |||
206 | /* MD5 is almost unused. So copy sha1 size to reduce code */ | ||
207 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { | ||
208 | if (out) | ||
209 | hash[i] = omap_sham_read(ctx->dd, | ||
210 | SHA_REG_DIGEST(i)); | ||
211 | else | ||
212 | omap_sham_write(ctx->dd, | ||
213 | SHA_REG_DIGEST(i), hash[i]); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | static void omap_sham_copy_ready_hash(struct ahash_request *req) | ||
218 | { | ||
219 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
220 | u32 *in = (u32 *)ctx->digest; | ||
197 | u32 *hash = (u32 *)req->result; | 221 | u32 *hash = (u32 *)req->result; |
198 | int i; | 222 | int i; |
199 | 223 | ||
224 | if (!hash) | ||
225 | return; | ||
226 | |||
200 | if (likely(ctx->flags & FLAGS_SHA1)) { | 227 | if (likely(ctx->flags & FLAGS_SHA1)) { |
201 | /* SHA1 results are in big endian */ | 228 | /* SHA1 results are in big endian */ |
202 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | 229 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) |
203 | if (out) | 230 | hash[i] = be32_to_cpu(in[i]); |
204 | hash[i] = be32_to_cpu(omap_sham_read(ctx->dd, | ||
205 | SHA_REG_DIGEST(i))); | ||
206 | else | ||
207 | omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), | ||
208 | cpu_to_be32(hash[i])); | ||
209 | } else { | 231 | } else { |
210 | /* MD5 results are in little endian */ | 232 | /* MD5 results are in little endian */ |
211 | for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) | 233 | for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) |
212 | if (out) | 234 | hash[i] = le32_to_cpu(in[i]); |
213 | hash[i] = le32_to_cpu(omap_sham_read(ctx->dd, | ||
214 | SHA_REG_DIGEST(i))); | ||
215 | else | ||
216 | omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), | ||
217 | cpu_to_le32(hash[i])); | ||
218 | } | 235 | } |
219 | } | 236 | } |
220 | 237 | ||
221 | static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | 238 | static int omap_sham_hw_init(struct omap_sham_dev *dd) |
222 | int final, int dma) | ||
223 | { | 239 | { |
224 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 240 | clk_enable(dd->iclk); |
225 | u32 val = length << 5, mask; | ||
226 | 241 | ||
227 | if (unlikely(!ctx->digcnt)) { | 242 | if (!(dd->flags & FLAGS_INIT)) { |
243 | omap_sham_write_mask(dd, SHA_REG_MASK, | ||
244 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | ||
228 | 245 | ||
229 | clk_enable(dd->iclk); | 246 | if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, |
247 | SHA_REG_SYSSTATUS_RESETDONE)) | ||
248 | return -ETIMEDOUT; | ||
230 | 249 | ||
231 | if (!(dd->flags & FLAGS_INIT)) { | 250 | dd->flags |= FLAGS_INIT; |
232 | omap_sham_write_mask(dd, SHA_REG_MASK, | 251 | dd->err = 0; |
233 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | 252 | } |
234 | 253 | ||
235 | if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, | 254 | return 0; |
236 | SHA_REG_SYSSTATUS_RESETDONE)) | 255 | } |
237 | return -ETIMEDOUT; | ||
238 | 256 | ||
239 | dd->flags |= FLAGS_INIT; | 257 | static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, |
240 | } | 258 | int final, int dma) |
241 | } else { | 259 | { |
260 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
261 | u32 val = length << 5, mask; | ||
262 | |||
263 | if (likely(ctx->digcnt)) | ||
242 | omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); | 264 | omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); |
243 | } | ||
244 | 265 | ||
245 | omap_sham_write_mask(dd, SHA_REG_MASK, | 266 | omap_sham_write_mask(dd, SHA_REG_MASK, |
246 | SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), | 267 | SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), |
@@ -260,29 +281,26 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | |||
260 | SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; | 281 | SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; |
261 | 282 | ||
262 | omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); | 283 | omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); |
263 | |||
264 | return 0; | ||
265 | } | 284 | } |
266 | 285 | ||
267 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | 286 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, |
268 | size_t length, int final) | 287 | size_t length, int final) |
269 | { | 288 | { |
270 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 289 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
271 | int err, count, len32; | 290 | int count, len32; |
272 | const u32 *buffer = (const u32 *)buf; | 291 | const u32 *buffer = (const u32 *)buf; |
273 | 292 | ||
274 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | 293 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", |
275 | ctx->digcnt, length, final); | 294 | ctx->digcnt, length, final); |
276 | 295 | ||
277 | err = omap_sham_write_ctrl(dd, length, final, 0); | 296 | omap_sham_write_ctrl(dd, length, final, 0); |
278 | if (err) | 297 | |
279 | return err; | 298 | /* should be non-zero before next lines to disable clocks later */ |
299 | ctx->digcnt += length; | ||
280 | 300 | ||
281 | if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) | 301 | if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) |
282 | return -ETIMEDOUT; | 302 | return -ETIMEDOUT; |
283 | 303 | ||
284 | ctx->digcnt += length; | ||
285 | |||
286 | if (final) | 304 | if (final) |
287 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | 305 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ |
288 | 306 | ||
@@ -298,16 +316,11 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
298 | size_t length, int final) | 316 | size_t length, int final) |
299 | { | 317 | { |
300 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 318 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
301 | int err, len32; | 319 | int len32; |
302 | 320 | ||
303 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", | 321 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", |
304 | ctx->digcnt, length, final); | 322 | ctx->digcnt, length, final); |
305 | 323 | ||
306 | /* flush cache entries related to our page */ | ||
307 | if (dma_addr == ctx->dma_addr) | ||
308 | dma_sync_single_for_device(dd->dev, dma_addr, length, | ||
309 | DMA_TO_DEVICE); | ||
310 | |||
311 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 324 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
312 | 325 | ||
313 | omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, | 326 | omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, |
@@ -317,9 +330,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
317 | omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, | 330 | omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, |
318 | dma_addr, 0, 0); | 331 | dma_addr, 0, 0); |
319 | 332 | ||
320 | err = omap_sham_write_ctrl(dd, length, final, 1); | 333 | omap_sham_write_ctrl(dd, length, final, 1); |
321 | if (err) | ||
322 | return err; | ||
323 | 334 | ||
324 | ctx->digcnt += length; | 335 | ctx->digcnt += length; |
325 | 336 | ||
@@ -371,15 +382,29 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) | |||
371 | return 0; | 382 | return 0; |
372 | } | 383 | } |
373 | 384 | ||
385 | static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, | ||
386 | struct omap_sham_reqctx *ctx, | ||
387 | size_t length, int final) | ||
388 | { | ||
389 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, | ||
390 | DMA_TO_DEVICE); | ||
391 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
392 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); | ||
393 | return -EINVAL; | ||
394 | } | ||
395 | |||
396 | ctx->flags &= ~FLAGS_SG; | ||
397 | |||
398 | /* next call does not fail... so no unmap in the case of error */ | ||
399 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); | ||
400 | } | ||
401 | |||
374 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | 402 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) |
375 | { | 403 | { |
376 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 404 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
377 | unsigned int final; | 405 | unsigned int final; |
378 | size_t count; | 406 | size_t count; |
379 | 407 | ||
380 | if (!ctx->total) | ||
381 | return 0; | ||
382 | |||
383 | omap_sham_append_sg(ctx); | 408 | omap_sham_append_sg(ctx); |
384 | 409 | ||
385 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | 410 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; |
@@ -390,30 +415,68 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | |||
390 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | 415 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { |
391 | count = ctx->bufcnt; | 416 | count = ctx->bufcnt; |
392 | ctx->bufcnt = 0; | 417 | ctx->bufcnt = 0; |
393 | return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); | 418 | return omap_sham_xmit_dma_map(dd, ctx, count, final); |
394 | } | 419 | } |
395 | 420 | ||
396 | return 0; | 421 | return 0; |
397 | } | 422 | } |
398 | 423 | ||
399 | static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) | 424 | /* Start address alignment */ |
425 | #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) | ||
426 | /* SHA1 block size alignment */ | ||
427 | #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) | ||
428 | |||
429 | static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | ||
400 | { | 430 | { |
401 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 431 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
402 | unsigned int length; | 432 | unsigned int length, final, tail; |
433 | struct scatterlist *sg; | ||
403 | 434 | ||
404 | ctx->flags |= FLAGS_FAST; | 435 | if (!ctx->total) |
436 | return 0; | ||
437 | |||
438 | if (ctx->bufcnt || ctx->offset) | ||
439 | return omap_sham_update_dma_slow(dd); | ||
440 | |||
441 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | ||
442 | ctx->digcnt, ctx->bufcnt, ctx->total); | ||
443 | |||
444 | sg = ctx->sg; | ||
405 | 445 | ||
406 | length = min(ctx->total, sg_dma_len(ctx->sg)); | 446 | if (!SG_AA(sg)) |
407 | ctx->total = length; | 447 | return omap_sham_update_dma_slow(dd); |
448 | |||
449 | if (!sg_is_last(sg) && !SG_SA(sg)) | ||
450 | /* size is not SHA1_BLOCK_SIZE aligned */ | ||
451 | return omap_sham_update_dma_slow(dd); | ||
452 | |||
453 | length = min(ctx->total, sg->length); | ||
454 | |||
455 | if (sg_is_last(sg)) { | ||
456 | if (!(ctx->flags & FLAGS_FINUP)) { | ||
457 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ | ||
458 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); | ||
459 | /* without finup() we need one block to close hash */ | ||
460 | if (!tail) | ||
461 | tail = SHA1_MD5_BLOCK_SIZE; | ||
462 | length -= tail; | ||
463 | } | ||
464 | } | ||
408 | 465 | ||
409 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | 466 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { |
410 | dev_err(dd->dev, "dma_map_sg error\n"); | 467 | dev_err(dd->dev, "dma_map_sg error\n"); |
411 | return -EINVAL; | 468 | return -EINVAL; |
412 | } | 469 | } |
413 | 470 | ||
471 | ctx->flags |= FLAGS_SG; | ||
472 | |||
414 | ctx->total -= length; | 473 | ctx->total -= length; |
474 | ctx->offset = length; /* offset where to start slow */ | ||
415 | 475 | ||
416 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); | 476 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; |
477 | |||
478 | /* next call does not fail... so no unmap in the case of error */ | ||
479 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); | ||
417 | } | 480 | } |
418 | 481 | ||
419 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) | 482 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) |
@@ -433,8 +496,17 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | |||
433 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 496 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
434 | 497 | ||
435 | omap_stop_dma(dd->dma_lch); | 498 | omap_stop_dma(dd->dma_lch); |
436 | if (ctx->flags & FLAGS_FAST) | 499 | if (ctx->flags & FLAGS_SG) { |
437 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | 500 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); |
501 | if (ctx->sg->length == ctx->offset) { | ||
502 | ctx->sg = sg_next(ctx->sg); | ||
503 | if (ctx->sg) | ||
504 | ctx->offset = 0; | ||
505 | } | ||
506 | } else { | ||
507 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, | ||
508 | DMA_TO_DEVICE); | ||
509 | } | ||
438 | 510 | ||
439 | return 0; | 511 | return 0; |
440 | } | 512 | } |
@@ -454,14 +526,7 @@ static void omap_sham_cleanup(struct ahash_request *req) | |||
454 | spin_unlock_irqrestore(&dd->lock, flags); | 526 | spin_unlock_irqrestore(&dd->lock, flags); |
455 | 527 | ||
456 | if (ctx->digcnt) | 528 | if (ctx->digcnt) |
457 | clk_disable(dd->iclk); | 529 | omap_sham_copy_ready_hash(req); |
458 | |||
459 | if (ctx->dma_addr) | ||
460 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, | ||
461 | DMA_TO_DEVICE); | ||
462 | |||
463 | if (ctx->buffer) | ||
464 | free_page((unsigned long)ctx->buffer); | ||
465 | 530 | ||
466 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); | 531 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); |
467 | } | 532 | } |
@@ -489,8 +554,6 @@ static int omap_sham_init(struct ahash_request *req) | |||
489 | 554 | ||
490 | ctx->flags = 0; | 555 | ctx->flags = 0; |
491 | 556 | ||
492 | ctx->flags |= FLAGS_FIRST; | ||
493 | |||
494 | dev_dbg(dd->dev, "init: digest size: %d\n", | 557 | dev_dbg(dd->dev, "init: digest size: %d\n", |
495 | crypto_ahash_digestsize(tfm)); | 558 | crypto_ahash_digestsize(tfm)); |
496 | 559 | ||
@@ -499,21 +562,7 @@ static int omap_sham_init(struct ahash_request *req) | |||
499 | 562 | ||
500 | ctx->bufcnt = 0; | 563 | ctx->bufcnt = 0; |
501 | ctx->digcnt = 0; | 564 | ctx->digcnt = 0; |
502 | 565 | ctx->buflen = BUFLEN; | |
503 | ctx->buflen = PAGE_SIZE; | ||
504 | ctx->buffer = (void *)__get_free_page( | ||
505 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
506 | GFP_KERNEL : GFP_ATOMIC); | ||
507 | if (!ctx->buffer) | ||
508 | return -ENOMEM; | ||
509 | |||
510 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, | ||
511 | DMA_TO_DEVICE); | ||
512 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
513 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); | ||
514 | free_page((unsigned long)ctx->buffer); | ||
515 | return -EINVAL; | ||
516 | } | ||
517 | 566 | ||
518 | if (tctx->flags & FLAGS_HMAC) { | 567 | if (tctx->flags & FLAGS_HMAC) { |
519 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 568 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
@@ -538,10 +587,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) | |||
538 | 587 | ||
539 | if (ctx->flags & FLAGS_CPU) | 588 | if (ctx->flags & FLAGS_CPU) |
540 | err = omap_sham_update_cpu(dd); | 589 | err = omap_sham_update_cpu(dd); |
541 | else if (ctx->flags & FLAGS_FAST) | ||
542 | err = omap_sham_update_dma_fast(dd); | ||
543 | else | 590 | else |
544 | err = omap_sham_update_dma_slow(dd); | 591 | err = omap_sham_update_dma_start(dd); |
545 | 592 | ||
546 | /* wait for dma completion before can take more data */ | 593 | /* wait for dma completion before can take more data */ |
547 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); | 594 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); |
@@ -560,15 +607,12 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) | |||
560 | use_dma = 0; | 607 | use_dma = 0; |
561 | 608 | ||
562 | if (use_dma) | 609 | if (use_dma) |
563 | err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); | 610 | err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); |
564 | else | 611 | else |
565 | err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); | 612 | err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); |
566 | 613 | ||
567 | ctx->bufcnt = 0; | 614 | ctx->bufcnt = 0; |
568 | 615 | ||
569 | if (err != -EINPROGRESS) | ||
570 | omap_sham_cleanup(req); | ||
571 | |||
572 | dev_dbg(dd->dev, "final_req: err: %d\n", err); | 616 | dev_dbg(dd->dev, "final_req: err: %d\n", err); |
573 | 617 | ||
574 | return err; | 618 | return err; |
@@ -576,6 +620,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) | |||
576 | 620 | ||
577 | static int omap_sham_finish_req_hmac(struct ahash_request *req) | 621 | static int omap_sham_finish_req_hmac(struct ahash_request *req) |
578 | { | 622 | { |
623 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
579 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | 624 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); |
580 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 625 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
581 | int bs = crypto_shash_blocksize(bctx->shash); | 626 | int bs = crypto_shash_blocksize(bctx->shash); |
@@ -590,48 +635,56 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req) | |||
590 | 635 | ||
591 | return crypto_shash_init(&desc.shash) ?: | 636 | return crypto_shash_init(&desc.shash) ?: |
592 | crypto_shash_update(&desc.shash, bctx->opad, bs) ?: | 637 | crypto_shash_update(&desc.shash, bctx->opad, bs) ?: |
593 | crypto_shash_finup(&desc.shash, req->result, ds, req->result); | 638 | crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest); |
594 | } | 639 | } |
595 | 640 | ||
596 | static void omap_sham_finish_req(struct ahash_request *req, int err) | 641 | static void omap_sham_finish_req(struct ahash_request *req, int err) |
597 | { | 642 | { |
598 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 643 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
644 | struct omap_sham_dev *dd = ctx->dd; | ||
599 | 645 | ||
600 | if (!err) { | 646 | if (!err) { |
601 | omap_sham_copy_hash(ctx->dd->req, 1); | 647 | omap_sham_copy_hash(ctx->dd->req, 1); |
602 | if (ctx->flags & FLAGS_HMAC) | 648 | if (ctx->flags & FLAGS_HMAC) |
603 | err = omap_sham_finish_req_hmac(req); | 649 | err = omap_sham_finish_req_hmac(req); |
650 | } else { | ||
651 | ctx->flags |= FLAGS_ERROR; | ||
604 | } | 652 | } |
605 | 653 | ||
606 | if (ctx->flags & FLAGS_FINAL) | 654 | if ((ctx->flags & FLAGS_FINAL) || err) |
607 | omap_sham_cleanup(req); | 655 | omap_sham_cleanup(req); |
608 | 656 | ||
609 | clear_bit(FLAGS_BUSY, &ctx->dd->flags); | 657 | clk_disable(dd->iclk); |
658 | dd->flags &= ~FLAGS_BUSY; | ||
610 | 659 | ||
611 | if (req->base.complete) | 660 | if (req->base.complete) |
612 | req->base.complete(&req->base, err); | 661 | req->base.complete(&req->base, err); |
613 | } | 662 | } |
614 | 663 | ||
615 | static int omap_sham_handle_queue(struct omap_sham_dev *dd) | 664 | static int omap_sham_handle_queue(struct omap_sham_dev *dd, |
665 | struct ahash_request *req) | ||
616 | { | 666 | { |
617 | struct crypto_async_request *async_req, *backlog; | 667 | struct crypto_async_request *async_req, *backlog; |
618 | struct omap_sham_reqctx *ctx; | 668 | struct omap_sham_reqctx *ctx; |
619 | struct ahash_request *req, *prev_req; | 669 | struct ahash_request *prev_req; |
620 | unsigned long flags; | 670 | unsigned long flags; |
621 | int err = 0; | 671 | int err = 0, ret = 0; |
622 | |||
623 | if (test_and_set_bit(FLAGS_BUSY, &dd->flags)) | ||
624 | return 0; | ||
625 | 672 | ||
626 | spin_lock_irqsave(&dd->lock, flags); | 673 | spin_lock_irqsave(&dd->lock, flags); |
674 | if (req) | ||
675 | ret = ahash_enqueue_request(&dd->queue, req); | ||
676 | if (dd->flags & FLAGS_BUSY) { | ||
677 | spin_unlock_irqrestore(&dd->lock, flags); | ||
678 | return ret; | ||
679 | } | ||
627 | backlog = crypto_get_backlog(&dd->queue); | 680 | backlog = crypto_get_backlog(&dd->queue); |
628 | async_req = crypto_dequeue_request(&dd->queue); | 681 | async_req = crypto_dequeue_request(&dd->queue); |
629 | if (!async_req) | 682 | if (async_req) |
630 | clear_bit(FLAGS_BUSY, &dd->flags); | 683 | dd->flags |= FLAGS_BUSY; |
631 | spin_unlock_irqrestore(&dd->lock, flags); | 684 | spin_unlock_irqrestore(&dd->lock, flags); |
632 | 685 | ||
633 | if (!async_req) | 686 | if (!async_req) |
634 | return 0; | 687 | return ret; |
635 | 688 | ||
636 | if (backlog) | 689 | if (backlog) |
637 | backlog->complete(backlog, -EINPROGRESS); | 690 | backlog->complete(backlog, -EINPROGRESS); |
@@ -646,7 +699,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) | |||
646 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | 699 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", |
647 | ctx->op, req->nbytes); | 700 | ctx->op, req->nbytes); |
648 | 701 | ||
649 | if (req != prev_req && ctx->digcnt) | 702 | |
703 | err = omap_sham_hw_init(dd); | ||
704 | if (err) | ||
705 | goto err1; | ||
706 | |||
707 | omap_set_dma_dest_params(dd->dma_lch, 0, | ||
708 | OMAP_DMA_AMODE_CONSTANT, | ||
709 | dd->phys_base + SHA_REG_DIN(0), 0, 16); | ||
710 | |||
711 | omap_set_dma_dest_burst_mode(dd->dma_lch, | ||
712 | OMAP_DMA_DATA_BURST_16); | ||
713 | |||
714 | omap_set_dma_src_burst_mode(dd->dma_lch, | ||
715 | OMAP_DMA_DATA_BURST_4); | ||
716 | |||
717 | if (ctx->digcnt) | ||
650 | /* request has changed - restore hash */ | 718 | /* request has changed - restore hash */ |
651 | omap_sham_copy_hash(req, 0); | 719 | omap_sham_copy_hash(req, 0); |
652 | 720 | ||
@@ -658,7 +726,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) | |||
658 | } else if (ctx->op == OP_FINAL) { | 726 | } else if (ctx->op == OP_FINAL) { |
659 | err = omap_sham_final_req(dd); | 727 | err = omap_sham_final_req(dd); |
660 | } | 728 | } |
661 | 729 | err1: | |
662 | if (err != -EINPROGRESS) { | 730 | if (err != -EINPROGRESS) { |
663 | /* done_task will not finish it, so do it here */ | 731 | /* done_task will not finish it, so do it here */ |
664 | omap_sham_finish_req(req, err); | 732 | omap_sham_finish_req(req, err); |
@@ -667,7 +735,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) | |||
667 | 735 | ||
668 | dev_dbg(dd->dev, "exit, err: %d\n", err); | 736 | dev_dbg(dd->dev, "exit, err: %d\n", err); |
669 | 737 | ||
670 | return err; | 738 | return ret; |
671 | } | 739 | } |
672 | 740 | ||
673 | static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) | 741 | static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) |
@@ -675,18 +743,10 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) | |||
675 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 743 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
676 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | 744 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); |
677 | struct omap_sham_dev *dd = tctx->dd; | 745 | struct omap_sham_dev *dd = tctx->dd; |
678 | unsigned long flags; | ||
679 | int err; | ||
680 | 746 | ||
681 | ctx->op = op; | 747 | ctx->op = op; |
682 | 748 | ||
683 | spin_lock_irqsave(&dd->lock, flags); | 749 | return omap_sham_handle_queue(dd, req); |
684 | err = ahash_enqueue_request(&dd->queue, req); | ||
685 | spin_unlock_irqrestore(&dd->lock, flags); | ||
686 | |||
687 | omap_sham_handle_queue(dd); | ||
688 | |||
689 | return err; | ||
690 | } | 750 | } |
691 | 751 | ||
692 | static int omap_sham_update(struct ahash_request *req) | 752 | static int omap_sham_update(struct ahash_request *req) |
@@ -709,21 +769,13 @@ static int omap_sham_update(struct ahash_request *req) | |||
709 | */ | 769 | */ |
710 | omap_sham_append_sg(ctx); | 770 | omap_sham_append_sg(ctx); |
711 | return 0; | 771 | return 0; |
712 | } else if (ctx->bufcnt + ctx->total <= 64) { | 772 | } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { |
773 | /* | ||
774 | * faster to use CPU for short transfers | ||
775 | */ | ||
713 | ctx->flags |= FLAGS_CPU; | 776 | ctx->flags |= FLAGS_CPU; |
714 | } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) { | ||
715 | /* may be can use faster functions */ | ||
716 | int aligned = IS_ALIGNED((u32)ctx->sg->offset, | ||
717 | sizeof(u32)); | ||
718 | |||
719 | if (aligned && (ctx->flags & FLAGS_FIRST)) | ||
720 | /* digest: first and final */ | ||
721 | ctx->flags |= FLAGS_FAST; | ||
722 | |||
723 | ctx->flags &= ~FLAGS_FIRST; | ||
724 | } | 777 | } |
725 | } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { | 778 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { |
726 | /* if not finaup -> not fast */ | ||
727 | omap_sham_append_sg(ctx); | 779 | omap_sham_append_sg(ctx); |
728 | return 0; | 780 | return 0; |
729 | } | 781 | } |
@@ -761,12 +813,14 @@ static int omap_sham_final(struct ahash_request *req) | |||
761 | 813 | ||
762 | ctx->flags |= FLAGS_FINUP; | 814 | ctx->flags |= FLAGS_FINUP; |
763 | 815 | ||
764 | /* OMAP HW accel works only with buffers >= 9 */ | 816 | if (!(ctx->flags & FLAGS_ERROR)) { |
765 | /* HMAC is always >= 9 because of ipad */ | 817 | /* OMAP HW accel works only with buffers >= 9 */ |
766 | if ((ctx->digcnt + ctx->bufcnt) < 9) | 818 | /* HMAC is always >= 9 because of ipad */ |
767 | err = omap_sham_final_shash(req); | 819 | if ((ctx->digcnt + ctx->bufcnt) < 9) |
768 | else if (ctx->bufcnt) | 820 | err = omap_sham_final_shash(req); |
769 | return omap_sham_enqueue(req, OP_FINAL); | 821 | else if (ctx->bufcnt) |
822 | return omap_sham_enqueue(req, OP_FINAL); | ||
823 | } | ||
770 | 824 | ||
771 | omap_sham_cleanup(req); | 825 | omap_sham_cleanup(req); |
772 | 826 | ||
@@ -836,6 +890,8 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
836 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); | 890 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); |
837 | const char *alg_name = crypto_tfm_alg_name(tfm); | 891 | const char *alg_name = crypto_tfm_alg_name(tfm); |
838 | 892 | ||
893 | pr_info("enter\n"); | ||
894 | |||
839 | /* Allocate a fallback and abort if it failed. */ | 895 | /* Allocate a fallback and abort if it failed. */ |
840 | tctx->fallback = crypto_alloc_shash(alg_name, 0, | 896 | tctx->fallback = crypto_alloc_shash(alg_name, 0, |
841 | CRYPTO_ALG_NEED_FALLBACK); | 897 | CRYPTO_ALG_NEED_FALLBACK); |
@@ -846,7 +902,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
846 | } | 902 | } |
847 | 903 | ||
848 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 904 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
849 | sizeof(struct omap_sham_reqctx)); | 905 | sizeof(struct omap_sham_reqctx) + BUFLEN); |
850 | 906 | ||
851 | if (alg_base) { | 907 | if (alg_base) { |
852 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 908 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
@@ -932,7 +988,7 @@ static struct ahash_alg algs[] = { | |||
932 | CRYPTO_ALG_NEED_FALLBACK, | 988 | CRYPTO_ALG_NEED_FALLBACK, |
933 | .cra_blocksize = SHA1_BLOCK_SIZE, | 989 | .cra_blocksize = SHA1_BLOCK_SIZE, |
934 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | 990 | .cra_ctxsize = sizeof(struct omap_sham_ctx), |
935 | .cra_alignmask = 0, | 991 | .cra_alignmask = OMAP_ALIGN_MASK, |
936 | .cra_module = THIS_MODULE, | 992 | .cra_module = THIS_MODULE, |
937 | .cra_init = omap_sham_cra_init, | 993 | .cra_init = omap_sham_cra_init, |
938 | .cra_exit = omap_sham_cra_exit, | 994 | .cra_exit = omap_sham_cra_exit, |
@@ -956,7 +1012,7 @@ static struct ahash_alg algs[] = { | |||
956 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1012 | .cra_blocksize = SHA1_BLOCK_SIZE, |
957 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | 1013 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + |
958 | sizeof(struct omap_sham_hmac_ctx), | 1014 | sizeof(struct omap_sham_hmac_ctx), |
959 | .cra_alignmask = 0, | 1015 | .cra_alignmask = OMAP_ALIGN_MASK, |
960 | .cra_module = THIS_MODULE, | 1016 | .cra_module = THIS_MODULE, |
961 | .cra_init = omap_sham_cra_sha1_init, | 1017 | .cra_init = omap_sham_cra_sha1_init, |
962 | .cra_exit = omap_sham_cra_exit, | 1018 | .cra_exit = omap_sham_cra_exit, |
@@ -980,7 +1036,7 @@ static struct ahash_alg algs[] = { | |||
980 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1036 | .cra_blocksize = SHA1_BLOCK_SIZE, |
981 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | 1037 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + |
982 | sizeof(struct omap_sham_hmac_ctx), | 1038 | sizeof(struct omap_sham_hmac_ctx), |
983 | .cra_alignmask = 0, | 1039 | .cra_alignmask = OMAP_ALIGN_MASK, |
984 | .cra_module = THIS_MODULE, | 1040 | .cra_module = THIS_MODULE, |
985 | .cra_init = omap_sham_cra_md5_init, | 1041 | .cra_init = omap_sham_cra_md5_init, |
986 | .cra_exit = omap_sham_cra_exit, | 1042 | .cra_exit = omap_sham_cra_exit, |
@@ -993,7 +1049,7 @@ static void omap_sham_done_task(unsigned long data) | |||
993 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | 1049 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; |
994 | struct ahash_request *req = dd->req; | 1050 | struct ahash_request *req = dd->req; |
995 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 1051 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
996 | int ready = 1; | 1052 | int ready = 0, err = 0; |
997 | 1053 | ||
998 | if (ctx->flags & FLAGS_OUTPUT_READY) { | 1054 | if (ctx->flags & FLAGS_OUTPUT_READY) { |
999 | ctx->flags &= ~FLAGS_OUTPUT_READY; | 1055 | ctx->flags &= ~FLAGS_OUTPUT_READY; |
@@ -1003,15 +1059,18 @@ static void omap_sham_done_task(unsigned long data) | |||
1003 | if (dd->flags & FLAGS_DMA_ACTIVE) { | 1059 | if (dd->flags & FLAGS_DMA_ACTIVE) { |
1004 | dd->flags &= ~FLAGS_DMA_ACTIVE; | 1060 | dd->flags &= ~FLAGS_DMA_ACTIVE; |
1005 | omap_sham_update_dma_stop(dd); | 1061 | omap_sham_update_dma_stop(dd); |
1006 | omap_sham_update_dma_slow(dd); | 1062 | if (!dd->err) |
1063 | err = omap_sham_update_dma_start(dd); | ||
1007 | } | 1064 | } |
1008 | 1065 | ||
1009 | if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { | 1066 | err = dd->err ? : err; |
1010 | dev_dbg(dd->dev, "update done\n"); | 1067 | |
1068 | if (err != -EINPROGRESS && (ready || err)) { | ||
1069 | dev_dbg(dd->dev, "update done: err: %d\n", err); | ||
1011 | /* finish curent request */ | 1070 | /* finish curent request */ |
1012 | omap_sham_finish_req(req, 0); | 1071 | omap_sham_finish_req(req, err); |
1013 | /* start new request */ | 1072 | /* start new request */ |
1014 | omap_sham_handle_queue(dd); | 1073 | omap_sham_handle_queue(dd, NULL); |
1015 | } | 1074 | } |
1016 | } | 1075 | } |
1017 | 1076 | ||
@@ -1019,7 +1078,7 @@ static void omap_sham_queue_task(unsigned long data) | |||
1019 | { | 1078 | { |
1020 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | 1079 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; |
1021 | 1080 | ||
1022 | omap_sham_handle_queue(dd); | 1081 | omap_sham_handle_queue(dd, NULL); |
1023 | } | 1082 | } |
1024 | 1083 | ||
1025 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) | 1084 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) |
@@ -1041,6 +1100,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) | |||
1041 | omap_sham_read(dd, SHA_REG_CTRL); | 1100 | omap_sham_read(dd, SHA_REG_CTRL); |
1042 | 1101 | ||
1043 | ctx->flags |= FLAGS_OUTPUT_READY; | 1102 | ctx->flags |= FLAGS_OUTPUT_READY; |
1103 | dd->err = 0; | ||
1044 | tasklet_schedule(&dd->done_task); | 1104 | tasklet_schedule(&dd->done_task); |
1045 | 1105 | ||
1046 | return IRQ_HANDLED; | 1106 | return IRQ_HANDLED; |
@@ -1050,8 +1110,13 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | |||
1050 | { | 1110 | { |
1051 | struct omap_sham_dev *dd = data; | 1111 | struct omap_sham_dev *dd = data; |
1052 | 1112 | ||
1053 | if (likely(lch == dd->dma_lch)) | 1113 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { |
1054 | tasklet_schedule(&dd->done_task); | 1114 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); |
1115 | dd->err = -EIO; | ||
1116 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | ||
1117 | } | ||
1118 | |||
1119 | tasklet_schedule(&dd->done_task); | ||
1055 | } | 1120 | } |
1056 | 1121 | ||
1057 | static int omap_sham_dma_init(struct omap_sham_dev *dd) | 1122 | static int omap_sham_dma_init(struct omap_sham_dev *dd) |
@@ -1066,15 +1131,6 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd) | |||
1066 | dev_err(dd->dev, "Unable to request DMA channel\n"); | 1131 | dev_err(dd->dev, "Unable to request DMA channel\n"); |
1067 | return err; | 1132 | return err; |
1068 | } | 1133 | } |
1069 | omap_set_dma_dest_params(dd->dma_lch, 0, | ||
1070 | OMAP_DMA_AMODE_CONSTANT, | ||
1071 | dd->phys_base + SHA_REG_DIN(0), 0, 16); | ||
1072 | |||
1073 | omap_set_dma_dest_burst_mode(dd->dma_lch, | ||
1074 | OMAP_DMA_DATA_BURST_16); | ||
1075 | |||
1076 | omap_set_dma_src_burst_mode(dd->dma_lch, | ||
1077 | OMAP_DMA_DATA_BURST_4); | ||
1078 | 1134 | ||
1079 | return 0; | 1135 | return 0; |
1080 | } | 1136 | } |