aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam/caamhash.c
diff options
context:
space:
mode:
authorRuchika Gupta <ruchika.gupta@freescale.com>2014-06-23 10:20:26 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2014-06-25 09:38:41 -0400
commit1da2be33ad4c30a2b1d5fe3053b5b7f63e6e2baa (patch)
treeae35138a3ce41c356e893589cce3720a6ee0bc5c /drivers/crypto/caam/caamhash.c
parentef94b1d834aace7101de77c3a7c2631b9ae9c5f6 (diff)
crypto: caam - Correct the dma mapping for sg table
At few places in caamhash and caamalg, after allocating a dmable buffer for sg table , the buffer was being modified. As per definition of DMA_FROM_DEVICE ,afer allocation the memory should be treated as read-only by the driver. This patch shifts the allocation of dmable buffer for sg table after it is populated by the driver, making it read-only as per the DMA API's requirement. Signed-off-by: Ruchika Gupta <ruchika.gupta@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/caam/caamhash.c')
-rw-r--r--drivers/crypto/caam/caamhash.c40
1 files changed, 23 insertions, 17 deletions
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0d9284ef96a8..2ab057b0a6d2 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -808,9 +808,6 @@ static int ahash_update_ctx(struct ahash_request *req)
808 edesc->sec4_sg_bytes = sec4_sg_bytes; 808 edesc->sec4_sg_bytes = sec4_sg_bytes;
809 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 809 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
810 DESC_JOB_IO_LEN; 810 DESC_JOB_IO_LEN;
811 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
812 sec4_sg_bytes,
813 DMA_TO_DEVICE);
814 811
815 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 812 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
816 edesc->sec4_sg, DMA_BIDIRECTIONAL); 813 edesc->sec4_sg, DMA_BIDIRECTIONAL);
@@ -839,6 +836,10 @@ static int ahash_update_ctx(struct ahash_request *req)
839 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | 836 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
840 HDR_REVERSE); 837 HDR_REVERSE);
841 838
839 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
840 sec4_sg_bytes,
841 DMA_TO_DEVICE);
842
842 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 843 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
843 to_hash, LDST_SGF); 844 to_hash, LDST_SGF);
844 845
@@ -911,8 +912,6 @@ static int ahash_final_ctx(struct ahash_request *req)
911 edesc->sec4_sg_bytes = sec4_sg_bytes; 912 edesc->sec4_sg_bytes = sec4_sg_bytes;
912 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 913 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
913 DESC_JOB_IO_LEN; 914 DESC_JOB_IO_LEN;
914 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
915 sec4_sg_bytes, DMA_TO_DEVICE);
916 edesc->src_nents = 0; 915 edesc->src_nents = 0;
917 916
918 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, 917 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
@@ -923,6 +922,9 @@ static int ahash_final_ctx(struct ahash_request *req)
923 last_buflen); 922 last_buflen);
924 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; 923 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
925 924
925 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
926 sec4_sg_bytes, DMA_TO_DEVICE);
927
926 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 928 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
927 LDST_SGF); 929 LDST_SGF);
928 930
@@ -989,8 +991,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
989 edesc->sec4_sg_bytes = sec4_sg_bytes; 991 edesc->sec4_sg_bytes = sec4_sg_bytes;
990 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 992 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
991 DESC_JOB_IO_LEN; 993 DESC_JOB_IO_LEN;
992 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
993 sec4_sg_bytes, DMA_TO_DEVICE);
994 994
995 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, 995 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
996 DMA_TO_DEVICE); 996 DMA_TO_DEVICE);
@@ -1002,6 +1002,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
1002 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1002 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1003 sec4_sg_src_index, chained); 1003 sec4_sg_src_index, chained);
1004 1004
1005 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1006 sec4_sg_bytes, DMA_TO_DEVICE);
1007
1005 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 1008 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1006 buflen + req->nbytes, LDST_SGF); 1009 buflen + req->nbytes, LDST_SGF);
1007 1010
@@ -1056,8 +1059,6 @@ static int ahash_digest(struct ahash_request *req)
1056 } 1059 }
1057 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1060 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1058 DESC_JOB_IO_LEN; 1061 DESC_JOB_IO_LEN;
1059 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1060 sec4_sg_bytes, DMA_TO_DEVICE);
1061 edesc->src_nents = src_nents; 1062 edesc->src_nents = src_nents;
1062 edesc->chained = chained; 1063 edesc->chained = chained;
1063 1064
@@ -1067,6 +1068,8 @@ static int ahash_digest(struct ahash_request *req)
1067 1068
1068 if (src_nents) { 1069 if (src_nents) {
1069 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); 1070 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1071 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1072 sec4_sg_bytes, DMA_TO_DEVICE);
1070 src_dma = edesc->sec4_sg_dma; 1073 src_dma = edesc->sec4_sg_dma;
1071 options = LDST_SGF; 1074 options = LDST_SGF;
1072 } else { 1075 } else {
@@ -1197,9 +1200,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1197 edesc->sec4_sg_bytes = sec4_sg_bytes; 1200 edesc->sec4_sg_bytes = sec4_sg_bytes;
1198 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1201 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1199 DESC_JOB_IO_LEN; 1202 DESC_JOB_IO_LEN;
1200 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1201 sec4_sg_bytes,
1202 DMA_TO_DEVICE);
1203 1203
1204 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1204 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1205 buf, *buflen); 1205 buf, *buflen);
@@ -1216,6 +1216,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1216 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | 1216 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1217 HDR_REVERSE); 1217 HDR_REVERSE);
1218 1218
1219 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1220 sec4_sg_bytes,
1221 DMA_TO_DEVICE);
1222
1219 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1223 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1220 1224
1221 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1225 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
@@ -1297,8 +1301,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1297 edesc->sec4_sg_bytes = sec4_sg_bytes; 1301 edesc->sec4_sg_bytes = sec4_sg_bytes;
1298 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1302 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1299 DESC_JOB_IO_LEN; 1303 DESC_JOB_IO_LEN;
1300 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1301 sec4_sg_bytes, DMA_TO_DEVICE);
1302 1304
1303 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, 1305 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1304 state->buf_dma, buflen, 1306 state->buf_dma, buflen,
@@ -1307,6 +1309,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1307 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, 1309 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1308 chained); 1310 chained);
1309 1311
1312 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1313 sec4_sg_bytes, DMA_TO_DEVICE);
1314
1310 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + 1315 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1311 req->nbytes, LDST_SGF); 1316 req->nbytes, LDST_SGF);
1312 1317
@@ -1380,13 +1385,14 @@ static int ahash_update_first(struct ahash_request *req)
1380 edesc->sec4_sg_bytes = sec4_sg_bytes; 1385 edesc->sec4_sg_bytes = sec4_sg_bytes;
1381 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1386 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1382 DESC_JOB_IO_LEN; 1387 DESC_JOB_IO_LEN;
1383 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1384 sec4_sg_bytes,
1385 DMA_TO_DEVICE);
1386 1388
1387 if (src_nents) { 1389 if (src_nents) {
1388 sg_to_sec4_sg_last(req->src, src_nents, 1390 sg_to_sec4_sg_last(req->src, src_nents,
1389 edesc->sec4_sg, 0); 1391 edesc->sec4_sg, 0);
1392 edesc->sec4_sg_dma = dma_map_single(jrdev,
1393 edesc->sec4_sg,
1394 sec4_sg_bytes,
1395 DMA_TO_DEVICE);
1390 src_dma = edesc->sec4_sg_dma; 1396 src_dma = edesc->sec4_sg_dma;
1391 options = LDST_SGF; 1397 options = LDST_SGF;
1392 } else { 1398 } else {