diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/crypto/mv_cesa.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/crypto/mv_cesa.c')
-rw-r--r-- | drivers/crypto/mv_cesa.c | 99 |
1 files changed, 44 insertions, 55 deletions
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 7d279e578df5..3cf303ee3fe3 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -133,7 +133,6 @@ struct mv_req_hash_ctx { | |||
133 | int extra_bytes; /* unprocessed bytes in buffer */ | 133 | int extra_bytes; /* unprocessed bytes in buffer */ |
134 | enum hash_op op; | 134 | enum hash_op op; |
135 | int count_add; | 135 | int count_add; |
136 | struct scatterlist dummysg; | ||
137 | }; | 136 | }; |
138 | 137 | ||
139 | static void compute_aes_dec_key(struct mv_ctx *ctx) | 138 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
@@ -187,9 +186,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) | |||
187 | { | 186 | { |
188 | int ret; | 187 | int ret; |
189 | void *sbuf; | 188 | void *sbuf; |
190 | int copied = 0; | 189 | int copy_len; |
191 | 190 | ||
192 | while (1) { | 191 | while (len) { |
193 | if (!p->sg_src_left) { | 192 | if (!p->sg_src_left) { |
194 | ret = sg_miter_next(&p->src_sg_it); | 193 | ret = sg_miter_next(&p->src_sg_it); |
195 | BUG_ON(!ret); | 194 | BUG_ON(!ret); |
@@ -199,19 +198,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) | |||
199 | 198 | ||
200 | sbuf = p->src_sg_it.addr + p->src_start; | 199 | sbuf = p->src_sg_it.addr + p->src_start; |
201 | 200 | ||
202 | if (p->sg_src_left <= len - copied) { | 201 | copy_len = min(p->sg_src_left, len); |
203 | memcpy(dbuf + copied, sbuf, p->sg_src_left); | 202 | memcpy(dbuf, sbuf, copy_len); |
204 | copied += p->sg_src_left; | 203 | |
205 | p->sg_src_left = 0; | 204 | p->src_start += copy_len; |
206 | if (copied >= len) | 205 | p->sg_src_left -= copy_len; |
207 | break; | 206 | |
208 | } else { | 207 | len -= copy_len; |
209 | int copy_len = len - copied; | 208 | dbuf += copy_len; |
210 | memcpy(dbuf + copied, sbuf, copy_len); | ||
211 | p->src_start += copy_len; | ||
212 | p->sg_src_left -= copy_len; | ||
213 | break; | ||
214 | } | ||
215 | } | 209 | } |
216 | } | 210 | } |
217 | 211 | ||
@@ -275,7 +269,6 @@ static void mv_process_current_q(int first_block) | |||
275 | memcpy(cpg->sram + SRAM_CONFIG, &op, | 269 | memcpy(cpg->sram + SRAM_CONFIG, &op, |
276 | sizeof(struct sec_accel_config)); | 270 | sizeof(struct sec_accel_config)); |
277 | 271 | ||
278 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
279 | /* GO */ | 272 | /* GO */ |
280 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 273 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
281 | 274 | ||
@@ -302,6 +295,7 @@ static void mv_crypto_algo_completion(void) | |||
302 | static void mv_process_hash_current(int first_block) | 295 | static void mv_process_hash_current(int first_block) |
303 | { | 296 | { |
304 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | 297 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); |
298 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
305 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | 299 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); |
306 | struct req_progress *p = &cpg->p; | 300 | struct req_progress *p = &cpg->p; |
307 | struct sec_accel_config op = { 0 }; | 301 | struct sec_accel_config op = { 0 }; |
@@ -314,6 +308,8 @@ static void mv_process_hash_current(int first_block) | |||
314 | break; | 308 | break; |
315 | case COP_HMAC_SHA1: | 309 | case COP_HMAC_SHA1: |
316 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; | 310 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; |
311 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, | ||
312 | tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | ||
317 | break; | 313 | break; |
318 | } | 314 | } |
319 | 315 | ||
@@ -345,11 +341,16 @@ static void mv_process_hash_current(int first_block) | |||
345 | op.config |= CFG_LAST_FRAG; | 341 | op.config |= CFG_LAST_FRAG; |
346 | else | 342 | else |
347 | op.config |= CFG_MID_FRAG; | 343 | op.config |= CFG_MID_FRAG; |
344 | |||
345 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | ||
346 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | ||
347 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | ||
348 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | ||
349 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
348 | } | 350 | } |
349 | 351 | ||
350 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | 352 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); |
351 | 353 | ||
352 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
353 | /* GO */ | 354 | /* GO */ |
354 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 355 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
355 | 356 | ||
@@ -409,12 +410,6 @@ static void mv_hash_algo_completion(void) | |||
409 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); | 410 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); |
410 | sg_miter_stop(&cpg->p.src_sg_it); | 411 | sg_miter_stop(&cpg->p.src_sg_it); |
411 | 412 | ||
412 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
413 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
414 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
415 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
416 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
417 | |||
418 | if (likely(ctx->last_chunk)) { | 413 | if (likely(ctx->last_chunk)) { |
419 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { | 414 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { |
420 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | 415 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, |
@@ -422,6 +417,12 @@ static void mv_hash_algo_completion(void) | |||
422 | (req))); | 417 | (req))); |
423 | } else | 418 | } else |
424 | mv_hash_final_fallback(req); | 419 | mv_hash_final_fallback(req); |
420 | } else { | ||
421 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
422 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
423 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
424 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
425 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
425 | } | 426 | } |
426 | } | 427 | } |
427 | 428 | ||
@@ -480,7 +481,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | |||
480 | int i = 0; | 481 | int i = 0; |
481 | size_t cur_len; | 482 | size_t cur_len; |
482 | 483 | ||
483 | while (1) { | 484 | while (sl) { |
484 | cur_len = sl[i].length; | 485 | cur_len = sl[i].length; |
485 | ++i; | 486 | ++i; |
486 | if (total_bytes > cur_len) | 487 | if (total_bytes > cur_len) |
@@ -517,29 +518,12 @@ static void mv_start_new_hash_req(struct ahash_request *req) | |||
517 | { | 518 | { |
518 | struct req_progress *p = &cpg->p; | 519 | struct req_progress *p = &cpg->p; |
519 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 520 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
520 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
521 | int num_sgs, hw_bytes, old_extra_bytes, rc; | 521 | int num_sgs, hw_bytes, old_extra_bytes, rc; |
522 | cpg->cur_req = &req->base; | 522 | cpg->cur_req = &req->base; |
523 | memset(p, 0, sizeof(struct req_progress)); | 523 | memset(p, 0, sizeof(struct req_progress)); |
524 | hw_bytes = req->nbytes + ctx->extra_bytes; | 524 | hw_bytes = req->nbytes + ctx->extra_bytes; |
525 | old_extra_bytes = ctx->extra_bytes; | 525 | old_extra_bytes = ctx->extra_bytes; |
526 | 526 | ||
527 | if (unlikely(ctx->extra_bytes)) { | ||
528 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
529 | ctx->extra_bytes); | ||
530 | p->crypt_len = ctx->extra_bytes; | ||
531 | } | ||
532 | |||
533 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | ||
534 | |||
535 | if (unlikely(!ctx->first_hash)) { | ||
536 | writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | ||
537 | writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | ||
538 | writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | ||
539 | writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | ||
540 | writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
541 | } | ||
542 | |||
543 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; | 527 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; |
544 | if (ctx->extra_bytes != 0 | 528 | if (ctx->extra_bytes != 0 |
545 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) | 529 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) |
@@ -555,6 +539,12 @@ static void mv_start_new_hash_req(struct ahash_request *req) | |||
555 | p->complete = mv_hash_algo_completion; | 539 | p->complete = mv_hash_algo_completion; |
556 | p->process = mv_process_hash_current; | 540 | p->process = mv_process_hash_current; |
557 | 541 | ||
542 | if (unlikely(old_extra_bytes)) { | ||
543 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
544 | old_extra_bytes); | ||
545 | p->crypt_len = old_extra_bytes; | ||
546 | } | ||
547 | |||
558 | mv_process_hash_current(1); | 548 | mv_process_hash_current(1); |
559 | } else { | 549 | } else { |
560 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, | 550 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, |
@@ -603,9 +593,7 @@ static int queue_manag(void *data) | |||
603 | if (async_req->tfm->__crt_alg->cra_type != | 593 | if (async_req->tfm->__crt_alg->cra_type != |
604 | &crypto_ahash_type) { | 594 | &crypto_ahash_type) { |
605 | struct ablkcipher_request *req = | 595 | struct ablkcipher_request *req = |
606 | container_of(async_req, | 596 | ablkcipher_request_cast(async_req); |
607 | struct ablkcipher_request, | ||
608 | base); | ||
609 | mv_start_new_crypt_req(req); | 597 | mv_start_new_crypt_req(req); |
610 | } else { | 598 | } else { |
611 | struct ahash_request *req = | 599 | struct ahash_request *req = |
@@ -722,19 +710,13 @@ static int mv_hash_update(struct ahash_request *req) | |||
722 | static int mv_hash_final(struct ahash_request *req) | 710 | static int mv_hash_final(struct ahash_request *req) |
723 | { | 711 | { |
724 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 712 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
725 | /* dummy buffer of 4 bytes */ | 713 | |
726 | sg_init_one(&ctx->dummysg, ctx->buffer, 4); | ||
727 | /* I think I'm allowed to do that... */ | ||
728 | ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0); | ||
729 | mv_update_hash_req_ctx(ctx, 1, 0); | 714 | mv_update_hash_req_ctx(ctx, 1, 0); |
730 | return mv_handle_req(&req->base); | 715 | return mv_handle_req(&req->base); |
731 | } | 716 | } |
732 | 717 | ||
733 | static int mv_hash_finup(struct ahash_request *req) | 718 | static int mv_hash_finup(struct ahash_request *req) |
734 | { | 719 | { |
735 | if (!req->nbytes) | ||
736 | return mv_hash_final(req); | ||
737 | |||
738 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); | 720 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); |
739 | return mv_handle_req(&req->base); | 721 | return mv_handle_req(&req->base); |
740 | } | 722 | } |
@@ -857,7 +839,7 @@ static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, | |||
857 | printk(KERN_WARNING MV_CESA | 839 | printk(KERN_WARNING MV_CESA |
858 | "Base driver '%s' could not be loaded!\n", | 840 | "Base driver '%s' could not be loaded!\n", |
859 | base_hash_name); | 841 | base_hash_name); |
860 | err = PTR_ERR(fallback_tfm); | 842 | err = PTR_ERR(base_hash); |
861 | goto err_bad_base; | 843 | goto err_bad_base; |
862 | } | 844 | } |
863 | } | 845 | } |
@@ -1065,14 +1047,21 @@ static int mv_probe(struct platform_device *pdev) | |||
1065 | 1047 | ||
1066 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | 1048 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
1067 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | 1049 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
1050 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
1068 | 1051 | ||
1069 | ret = crypto_register_alg(&mv_aes_alg_ecb); | 1052 | ret = crypto_register_alg(&mv_aes_alg_ecb); |
1070 | if (ret) | 1053 | if (ret) { |
1054 | printk(KERN_WARNING MV_CESA | ||
1055 | "Could not register aes-ecb driver\n"); | ||
1071 | goto err_irq; | 1056 | goto err_irq; |
1057 | } | ||
1072 | 1058 | ||
1073 | ret = crypto_register_alg(&mv_aes_alg_cbc); | 1059 | ret = crypto_register_alg(&mv_aes_alg_cbc); |
1074 | if (ret) | 1060 | if (ret) { |
1061 | printk(KERN_WARNING MV_CESA | ||
1062 | "Could not register aes-cbc driver\n"); | ||
1075 | goto err_unreg_ecb; | 1063 | goto err_unreg_ecb; |
1064 | } | ||
1076 | 1065 | ||
1077 | ret = crypto_register_ahash(&mv_sha1_alg); | 1066 | ret = crypto_register_ahash(&mv_sha1_alg); |
1078 | if (ret == 0) | 1067 | if (ret == 0) |