aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c111
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
2 files changed, 45 insertions, 68 deletions
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index f3aac3efd51c..844dd9a20486 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -2894,6 +2894,7 @@ struct caam_hash_state {
2894 struct caam_request caam_req; 2894 struct caam_request caam_req;
2895 dma_addr_t buf_dma; 2895 dma_addr_t buf_dma;
2896 dma_addr_t ctx_dma; 2896 dma_addr_t ctx_dma;
2897 int ctx_dma_len;
2897 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 2898 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2898 int buflen_0; 2899 int buflen_0;
2899 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 2900 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -2967,6 +2968,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
2967 struct caam_hash_state *state, int ctx_len, 2968 struct caam_hash_state *state, int ctx_len,
2968 struct dpaa2_sg_entry *qm_sg, u32 flag) 2969 struct dpaa2_sg_entry *qm_sg, u32 flag)
2969{ 2970{
2971 state->ctx_dma_len = ctx_len;
2970 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); 2972 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2971 if (dma_mapping_error(dev, state->ctx_dma)) { 2973 if (dma_mapping_error(dev, state->ctx_dma)) {
2972 dev_err(dev, "unable to map ctx\n"); 2974 dev_err(dev, "unable to map ctx\n");
@@ -3205,14 +3207,12 @@ bad_free_key:
3205} 3207}
3206 3208
3207static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, 3209static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3208 struct ahash_request *req, int dst_len) 3210 struct ahash_request *req)
3209{ 3211{
3210 struct caam_hash_state *state = ahash_request_ctx(req); 3212 struct caam_hash_state *state = ahash_request_ctx(req);
3211 3213
3212 if (edesc->src_nents) 3214 if (edesc->src_nents)
3213 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 3215 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3214 if (edesc->dst_dma)
3215 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
3216 3216
3217 if (edesc->qm_sg_bytes) 3217 if (edesc->qm_sg_bytes)
3218 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, 3218 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
@@ -3227,18 +3227,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3227 3227
3228static inline void ahash_unmap_ctx(struct device *dev, 3228static inline void ahash_unmap_ctx(struct device *dev,
3229 struct ahash_edesc *edesc, 3229 struct ahash_edesc *edesc,
3230 struct ahash_request *req, int dst_len, 3230 struct ahash_request *req, u32 flag)
3231 u32 flag)
3232{ 3231{
3233 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3234 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3235 struct caam_hash_state *state = ahash_request_ctx(req); 3232 struct caam_hash_state *state = ahash_request_ctx(req);
3236 3233
3237 if (state->ctx_dma) { 3234 if (state->ctx_dma) {
3238 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 3235 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3239 state->ctx_dma = 0; 3236 state->ctx_dma = 0;
3240 } 3237 }
3241 ahash_unmap(dev, edesc, req, dst_len); 3238 ahash_unmap(dev, edesc, req);
3242} 3239}
3243 3240
3244static void ahash_done(void *cbk_ctx, u32 status) 3241static void ahash_done(void *cbk_ctx, u32 status)
@@ -3259,16 +3256,13 @@ static void ahash_done(void *cbk_ctx, u32 status)
3259 ecode = -EIO; 3256 ecode = -EIO;
3260 } 3257 }
3261 3258
3262 ahash_unmap(ctx->dev, edesc, req, digestsize); 3259 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3260 memcpy(req->result, state->caam_ctx, digestsize);
3263 qi_cache_free(edesc); 3261 qi_cache_free(edesc);
3264 3262
3265 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", 3263 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3266 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 3264 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3267 ctx->ctx_len, 1); 3265 ctx->ctx_len, 1);
3268 if (req->result)
3269 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3270 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3271 digestsize, 1);
3272 3266
3273 req->base.complete(&req->base, ecode); 3267 req->base.complete(&req->base, ecode);
3274} 3268}
@@ -3290,7 +3284,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
3290 ecode = -EIO; 3284 ecode = -EIO;
3291 } 3285 }
3292 3286
3293 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 3287 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3294 switch_buf(state); 3288 switch_buf(state);
3295 qi_cache_free(edesc); 3289 qi_cache_free(edesc);
3296 3290
@@ -3323,16 +3317,13 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3323 ecode = -EIO; 3317 ecode = -EIO;
3324 } 3318 }
3325 3319
3326 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE); 3320 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3321 memcpy(req->result, state->caam_ctx, digestsize);
3327 qi_cache_free(edesc); 3322 qi_cache_free(edesc);
3328 3323
3329 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", 3324 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3330 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 3325 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3331 ctx->ctx_len, 1); 3326 ctx->ctx_len, 1);
3332 if (req->result)
3333 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3334 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3335 digestsize, 1);
3336 3327
3337 req->base.complete(&req->base, ecode); 3328 req->base.complete(&req->base, ecode);
3338} 3329}
@@ -3354,7 +3345,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3354 ecode = -EIO; 3345 ecode = -EIO;
3355 } 3346 }
3356 3347
3357 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 3348 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3358 switch_buf(state); 3349 switch_buf(state);
3359 qi_cache_free(edesc); 3350 qi_cache_free(edesc);
3360 3351
@@ -3492,7 +3483,7 @@ static int ahash_update_ctx(struct ahash_request *req)
3492 3483
3493 return ret; 3484 return ret;
3494unmap_ctx: 3485unmap_ctx:
3495 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 3486 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3496 qi_cache_free(edesc); 3487 qi_cache_free(edesc);
3497 return ret; 3488 return ret;
3498} 3489}
@@ -3524,7 +3515,7 @@ static int ahash_final_ctx(struct ahash_request *req)
3524 sg_table = &edesc->sgt[0]; 3515 sg_table = &edesc->sgt[0];
3525 3516
3526 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, 3517 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3527 DMA_TO_DEVICE); 3518 DMA_BIDIRECTIONAL);
3528 if (ret) 3519 if (ret)
3529 goto unmap_ctx; 3520 goto unmap_ctx;
3530 3521
@@ -3543,22 +3534,13 @@ static int ahash_final_ctx(struct ahash_request *req)
3543 } 3534 }
3544 edesc->qm_sg_bytes = qm_sg_bytes; 3535 edesc->qm_sg_bytes = qm_sg_bytes;
3545 3536
3546 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3547 DMA_FROM_DEVICE);
3548 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3549 dev_err(ctx->dev, "unable to map dst\n");
3550 edesc->dst_dma = 0;
3551 ret = -ENOMEM;
3552 goto unmap_ctx;
3553 }
3554
3555 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); 3537 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3556 dpaa2_fl_set_final(in_fle, true); 3538 dpaa2_fl_set_final(in_fle, true);
3557 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); 3539 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3558 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); 3540 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3559 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen); 3541 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3560 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3542 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3561 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 3543 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3562 dpaa2_fl_set_len(out_fle, digestsize); 3544 dpaa2_fl_set_len(out_fle, digestsize);
3563 3545
3564 req_ctx->flc = &ctx->flc[FINALIZE]; 3546 req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3573,7 +3555,7 @@ static int ahash_final_ctx(struct ahash_request *req)
3573 return ret; 3555 return ret;
3574 3556
3575unmap_ctx: 3557unmap_ctx:
3576 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE); 3558 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3577 qi_cache_free(edesc); 3559 qi_cache_free(edesc);
3578 return ret; 3560 return ret;
3579} 3561}
@@ -3626,7 +3608,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
3626 sg_table = &edesc->sgt[0]; 3608 sg_table = &edesc->sgt[0];
3627 3609
3628 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, 3610 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3629 DMA_TO_DEVICE); 3611 DMA_BIDIRECTIONAL);
3630 if (ret) 3612 if (ret)
3631 goto unmap_ctx; 3613 goto unmap_ctx;
3632 3614
@@ -3645,22 +3627,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
3645 } 3627 }
3646 edesc->qm_sg_bytes = qm_sg_bytes; 3628 edesc->qm_sg_bytes = qm_sg_bytes;
3647 3629
3648 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3649 DMA_FROM_DEVICE);
3650 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3651 dev_err(ctx->dev, "unable to map dst\n");
3652 edesc->dst_dma = 0;
3653 ret = -ENOMEM;
3654 goto unmap_ctx;
3655 }
3656
3657 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); 3630 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3658 dpaa2_fl_set_final(in_fle, true); 3631 dpaa2_fl_set_final(in_fle, true);
3659 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); 3632 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3660 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); 3633 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3661 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes); 3634 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3662 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3635 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3663 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 3636 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3664 dpaa2_fl_set_len(out_fle, digestsize); 3637 dpaa2_fl_set_len(out_fle, digestsize);
3665 3638
3666 req_ctx->flc = &ctx->flc[FINALIZE]; 3639 req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3675,7 +3648,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
3675 return ret; 3648 return ret;
3676 3649
3677unmap_ctx: 3650unmap_ctx:
3678 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE); 3651 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3679 qi_cache_free(edesc); 3652 qi_cache_free(edesc);
3680 return ret; 3653 return ret;
3681} 3654}
@@ -3744,18 +3717,19 @@ static int ahash_digest(struct ahash_request *req)
3744 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); 3717 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3745 } 3718 }
3746 3719
3747 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, 3720 state->ctx_dma_len = digestsize;
3721 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3748 DMA_FROM_DEVICE); 3722 DMA_FROM_DEVICE);
3749 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { 3723 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3750 dev_err(ctx->dev, "unable to map dst\n"); 3724 dev_err(ctx->dev, "unable to map ctx\n");
3751 edesc->dst_dma = 0; 3725 state->ctx_dma = 0;
3752 goto unmap; 3726 goto unmap;
3753 } 3727 }
3754 3728
3755 dpaa2_fl_set_final(in_fle, true); 3729 dpaa2_fl_set_final(in_fle, true);
3756 dpaa2_fl_set_len(in_fle, req->nbytes); 3730 dpaa2_fl_set_len(in_fle, req->nbytes);
3757 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3731 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3758 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 3732 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3759 dpaa2_fl_set_len(out_fle, digestsize); 3733 dpaa2_fl_set_len(out_fle, digestsize);
3760 3734
3761 req_ctx->flc = &ctx->flc[DIGEST]; 3735 req_ctx->flc = &ctx->flc[DIGEST];
@@ -3769,7 +3743,7 @@ static int ahash_digest(struct ahash_request *req)
3769 return ret; 3743 return ret;
3770 3744
3771unmap: 3745unmap:
3772 ahash_unmap(ctx->dev, edesc, req, digestsize); 3746 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3773 qi_cache_free(edesc); 3747 qi_cache_free(edesc);
3774 return ret; 3748 return ret;
3775} 3749}
@@ -3804,11 +3778,12 @@ static int ahash_final_no_ctx(struct ahash_request *req)
3804 } 3778 }
3805 } 3779 }
3806 3780
3807 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, 3781 state->ctx_dma_len = digestsize;
3782 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3808 DMA_FROM_DEVICE); 3783 DMA_FROM_DEVICE);
3809 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { 3784 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3810 dev_err(ctx->dev, "unable to map dst\n"); 3785 dev_err(ctx->dev, "unable to map ctx\n");
3811 edesc->dst_dma = 0; 3786 state->ctx_dma = 0;
3812 goto unmap; 3787 goto unmap;
3813 } 3788 }
3814 3789
@@ -3826,7 +3801,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
3826 dpaa2_fl_set_len(in_fle, buflen); 3801 dpaa2_fl_set_len(in_fle, buflen);
3827 } 3802 }
3828 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3803 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3829 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 3804 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3830 dpaa2_fl_set_len(out_fle, digestsize); 3805 dpaa2_fl_set_len(out_fle, digestsize);
3831 3806
3832 req_ctx->flc = &ctx->flc[DIGEST]; 3807 req_ctx->flc = &ctx->flc[DIGEST];
@@ -3841,7 +3816,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
3841 return ret; 3816 return ret;
3842 3817
3843unmap: 3818unmap:
3844 ahash_unmap(ctx->dev, edesc, req, digestsize); 3819 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3845 qi_cache_free(edesc); 3820 qi_cache_free(edesc);
3846 return ret; 3821 return ret;
3847} 3822}
@@ -3921,6 +3896,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
3921 } 3896 }
3922 edesc->qm_sg_bytes = qm_sg_bytes; 3897 edesc->qm_sg_bytes = qm_sg_bytes;
3923 3898
3899 state->ctx_dma_len = ctx->ctx_len;
3924 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, 3900 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3925 ctx->ctx_len, DMA_FROM_DEVICE); 3901 ctx->ctx_len, DMA_FROM_DEVICE);
3926 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { 3902 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -3969,7 +3945,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
3969 3945
3970 return ret; 3946 return ret;
3971unmap_ctx: 3947unmap_ctx:
3972 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 3948 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3973 qi_cache_free(edesc); 3949 qi_cache_free(edesc);
3974 return ret; 3950 return ret;
3975} 3951}
@@ -4034,11 +4010,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
4034 } 4010 }
4035 edesc->qm_sg_bytes = qm_sg_bytes; 4011 edesc->qm_sg_bytes = qm_sg_bytes;
4036 4012
4037 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, 4013 state->ctx_dma_len = digestsize;
4014 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4038 DMA_FROM_DEVICE); 4015 DMA_FROM_DEVICE);
4039 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { 4016 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4040 dev_err(ctx->dev, "unable to map dst\n"); 4017 dev_err(ctx->dev, "unable to map ctx\n");
4041 edesc->dst_dma = 0; 4018 state->ctx_dma = 0;
4042 ret = -ENOMEM; 4019 ret = -ENOMEM;
4043 goto unmap; 4020 goto unmap;
4044 } 4021 }
@@ -4049,7 +4026,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
4049 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); 4026 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4050 dpaa2_fl_set_len(in_fle, buflen + req->nbytes); 4027 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4051 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 4028 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4052 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 4029 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4053 dpaa2_fl_set_len(out_fle, digestsize); 4030 dpaa2_fl_set_len(out_fle, digestsize);
4054 4031
4055 req_ctx->flc = &ctx->flc[DIGEST]; 4032 req_ctx->flc = &ctx->flc[DIGEST];
@@ -4064,7 +4041,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
4064 4041
4065 return ret; 4042 return ret;
4066unmap: 4043unmap:
4067 ahash_unmap(ctx->dev, edesc, req, digestsize); 4044 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4068 qi_cache_free(edesc); 4045 qi_cache_free(edesc);
4069 return -ENOMEM; 4046 return -ENOMEM;
4070} 4047}
@@ -4151,6 +4128,7 @@ static int ahash_update_first(struct ahash_request *req)
4151 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 4128 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4152 *next_buflen, 0); 4129 *next_buflen, 0);
4153 4130
4131 state->ctx_dma_len = ctx->ctx_len;
4154 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, 4132 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4155 ctx->ctx_len, DMA_FROM_DEVICE); 4133 ctx->ctx_len, DMA_FROM_DEVICE);
4156 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { 4134 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -4194,7 +4172,7 @@ static int ahash_update_first(struct ahash_request *req)
4194 4172
4195 return ret; 4173 return ret;
4196unmap_ctx: 4174unmap_ctx:
4197 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 4175 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4198 qi_cache_free(edesc); 4176 qi_cache_free(edesc);
4199 return ret; 4177 return ret;
4200} 4178}
@@ -4213,6 +4191,7 @@ static int ahash_init(struct ahash_request *req)
4213 state->final = ahash_final_no_ctx; 4191 state->final = ahash_final_no_ctx;
4214 4192
4215 state->ctx_dma = 0; 4193 state->ctx_dma = 0;
4194 state->ctx_dma_len = 0;
4216 state->current_buf = 0; 4195 state->current_buf = 0;
4217 state->buf_dma = 0; 4196 state->buf_dma = 0;
4218 state->buflen_0 = 0; 4197 state->buflen_0 = 0;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 20890780fb82..be5085451053 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -162,14 +162,12 @@ struct skcipher_edesc {
162 162
163/* 163/*
164 * ahash_edesc - s/w-extended ahash descriptor 164 * ahash_edesc - s/w-extended ahash descriptor
165 * @dst_dma: I/O virtual address of req->result
166 * @qm_sg_dma: I/O virtual address of h/w link table 165 * @qm_sg_dma: I/O virtual address of h/w link table
167 * @src_nents: number of segments in input scatterlist 166 * @src_nents: number of segments in input scatterlist
168 * @qm_sg_bytes: length of dma mapped qm_sg space 167 * @qm_sg_bytes: length of dma mapped qm_sg space
169 * @sgt: pointer to h/w link table 168 * @sgt: pointer to h/w link table
170 */ 169 */
171struct ahash_edesc { 170struct ahash_edesc {
172 dma_addr_t dst_dma;
173 dma_addr_t qm_sg_dma; 171 dma_addr_t qm_sg_dma;
174 int src_nents; 172 int src_nents;
175 int qm_sg_bytes; 173 int qm_sg_bytes;