aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qce
diff options
context:
space:
mode:
authorLABBE Corentin <clabbe.montjoie@gmail.com>2015-10-02 02:01:02 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-10-08 09:42:19 -0400
commitfea40451530fea9ed3efd31eb932d1855a3a6e2d (patch)
tree3cfadb25a90928521f91d91859197261172e7612 /drivers/crypto/qce
parent166db195536f380c4545a8d2fca9789402464bc8 (diff)
crypto: qce - dma_map_sg can handle chained SG
The qce driver use two dma_map_sg path according to SG are chained or not. Since dma_map_sg can handle both case, clean the code with all references to sg chained. Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions. Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/qce')
-rw-r--r--drivers/crypto/qce/ablkcipher.c30
-rw-r--r--drivers/crypto/qce/cipher.h4
-rw-r--r--drivers/crypto/qce/dma.c52
-rw-r--r--drivers/crypto/qce/dma.h5
-rw-r--r--drivers/crypto/qce/sha.c18
-rw-r--r--drivers/crypto/qce/sha.h2
6 files changed, 17 insertions, 94 deletions
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ad592de475a4..2c0d63d48747 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -44,10 +44,8 @@ static void qce_ablkcipher_done(void *data)
44 error); 44 error);
45 45
46 if (diff_dst) 46 if (diff_dst)
47 qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src, 47 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
48 rctx->dst_chained); 48 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
49 qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
50 rctx->dst_chained);
51 49
52 sg_free_table(&rctx->dst_tbl); 50 sg_free_table(&rctx->dst_tbl);
53 51
@@ -80,15 +78,11 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
80 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 78 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
81 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 79 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
82 80
83 rctx->src_nents = qce_countsg(req->src, req->nbytes, 81 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
84 &rctx->src_chained); 82 if (diff_dst)
85 if (diff_dst) { 83 rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
86 rctx->dst_nents = qce_countsg(req->dst, req->nbytes, 84 else
87 &rctx->dst_chained);
88 } else {
89 rctx->dst_nents = rctx->src_nents; 85 rctx->dst_nents = rctx->src_nents;
90 rctx->dst_chained = rctx->src_chained;
91 }
92 86
93 rctx->dst_nents += 1; 87 rctx->dst_nents += 1;
94 88
@@ -116,14 +110,12 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
116 sg_mark_end(sg); 110 sg_mark_end(sg);
117 rctx->dst_sg = rctx->dst_tbl.sgl; 111 rctx->dst_sg = rctx->dst_tbl.sgl;
118 112
119 ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, 113 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
120 rctx->dst_chained);
121 if (ret < 0) 114 if (ret < 0)
122 goto error_free; 115 goto error_free;
123 116
124 if (diff_dst) { 117 if (diff_dst) {
125 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src, 118 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
126 rctx->src_chained);
127 if (ret < 0) 119 if (ret < 0)
128 goto error_unmap_dst; 120 goto error_unmap_dst;
129 rctx->src_sg = req->src; 121 rctx->src_sg = req->src;
@@ -149,11 +141,9 @@ error_terminate:
149 qce_dma_terminate_all(&qce->dma); 141 qce_dma_terminate_all(&qce->dma);
150error_unmap_src: 142error_unmap_src:
151 if (diff_dst) 143 if (diff_dst)
152 qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src, 144 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
153 rctx->src_chained);
154error_unmap_dst: 145error_unmap_dst:
155 qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, 146 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
156 rctx->dst_chained);
157error_free: 147error_free:
158 sg_free_table(&rctx->dst_tbl); 148 sg_free_table(&rctx->dst_tbl);
159 return ret; 149 return ret;
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index d5757cfcda2d..5c6a5f8633e5 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -32,8 +32,6 @@ struct qce_cipher_ctx {
32 * @ivsize: IV size 32 * @ivsize: IV size
33 * @src_nents: source entries 33 * @src_nents: source entries
34 * @dst_nents: destination entries 34 * @dst_nents: destination entries
35 * @src_chained: is source chained
36 * @dst_chained: is destination chained
37 * @result_sg: scatterlist used for result buffer 35 * @result_sg: scatterlist used for result buffer
38 * @dst_tbl: destination sg table 36 * @dst_tbl: destination sg table
39 * @dst_sg: destination sg pointer table beginning 37 * @dst_sg: destination sg pointer table beginning
@@ -47,8 +45,6 @@ struct qce_cipher_reqctx {
47 unsigned int ivsize; 45 unsigned int ivsize;
48 int src_nents; 46 int src_nents;
49 int dst_nents; 47 int dst_nents;
50 bool src_chained;
51 bool dst_chained;
52 struct scatterlist result_sg; 48 struct scatterlist result_sg;
53 struct sg_table dst_tbl; 49 struct sg_table dst_tbl;
54 struct scatterlist *dst_sg; 50 struct scatterlist *dst_sg;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 378cb768647f..4797e795c9b9 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -54,58 +54,6 @@ void qce_dma_release(struct qce_dma_data *dma)
54 kfree(dma->result_buf); 54 kfree(dma->result_buf);
55} 55}
56 56
57int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
58 enum dma_data_direction dir, bool chained)
59{
60 int err;
61
62 if (chained) {
63 while (sg) {
64 err = dma_map_sg(dev, sg, 1, dir);
65 if (!err)
66 return -EFAULT;
67 sg = sg_next(sg);
68 }
69 } else {
70 err = dma_map_sg(dev, sg, nents, dir);
71 if (!err)
72 return -EFAULT;
73 }
74
75 return nents;
76}
77
78void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
79 enum dma_data_direction dir, bool chained)
80{
81 if (chained)
82 while (sg) {
83 dma_unmap_sg(dev, sg, 1, dir);
84 sg = sg_next(sg);
85 }
86 else
87 dma_unmap_sg(dev, sg, nents, dir);
88}
89
90int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
91{
92 struct scatterlist *sg = sglist;
93 int nents = 0;
94
95 if (chained)
96 *chained = false;
97
98 while (nbytes > 0 && sg) {
99 nents++;
100 nbytes -= sg->length;
101 if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
102 *chained = true;
103 sg = sg_next(sg);
104 }
105
106 return nents;
107}
108
109struct scatterlist * 57struct scatterlist *
110qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) 58qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
111{ 59{
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 65bedb81de0b..130235d17bb4 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -49,11 +49,6 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
49 dma_async_tx_callback cb, void *cb_param); 49 dma_async_tx_callback cb, void *cb_param);
50void qce_dma_issue_pending(struct qce_dma_data *dma); 50void qce_dma_issue_pending(struct qce_dma_data *dma);
51int qce_dma_terminate_all(struct qce_dma_data *dma); 51int qce_dma_terminate_all(struct qce_dma_data *dma);
52int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
53void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
54 enum dma_data_direction dir, bool chained);
55int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
56 enum dma_data_direction dir, bool chained);
57struct scatterlist * 52struct scatterlist *
58qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); 53qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
59 54
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index be2f5049256a..0c9973ec80eb 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
51 if (error) 51 if (error)
52 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); 52 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
53 53
54 qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, 54 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
55 rctx->src_chained); 55 dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
56 qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
57 56
58 memcpy(rctx->digest, result->auth_iv, digestsize); 57 memcpy(rctx->digest, result->auth_iv, digestsize);
59 if (req->result) 58 if (req->result)
@@ -92,16 +91,14 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
92 rctx->authklen = AES_KEYSIZE_128; 91 rctx->authklen = AES_KEYSIZE_128;
93 } 92 }
94 93
95 rctx->src_nents = qce_countsg(req->src, req->nbytes, 94 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
96 &rctx->src_chained); 95 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
97 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
98 rctx->src_chained);
99 if (ret < 0) 96 if (ret < 0)
100 return ret; 97 return ret;
101 98
102 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 99 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
103 100
104 ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); 101 ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
105 if (ret < 0) 102 if (ret < 0)
106 goto error_unmap_src; 103 goto error_unmap_src;
107 104
@@ -121,10 +118,9 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
121error_terminate: 118error_terminate:
122 qce_dma_terminate_all(&qce->dma); 119 qce_dma_terminate_all(&qce->dma);
123error_unmap_dst: 120error_unmap_dst:
124 qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); 121 dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
125error_unmap_src: 122error_unmap_src:
126 qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, 123 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
127 rctx->src_chained);
128 return ret; 124 return ret;
129} 125}
130 126
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
index 286f0d5397f3..236bb5e9ae75 100644
--- a/drivers/crypto/qce/sha.h
+++ b/drivers/crypto/qce/sha.h
@@ -36,7 +36,6 @@ struct qce_sha_ctx {
36 * @flags: operation flags 36 * @flags: operation flags
37 * @src_orig: original request sg list 37 * @src_orig: original request sg list
38 * @nbytes_orig: original request number of bytes 38 * @nbytes_orig: original request number of bytes
39 * @src_chained: is source scatterlist chained
40 * @src_nents: source number of entries 39 * @src_nents: source number of entries
41 * @byte_count: byte count 40 * @byte_count: byte count
42 * @count: save count in states during update, import and export 41 * @count: save count in states during update, import and export
@@ -55,7 +54,6 @@ struct qce_sha_reqctx {
55 unsigned long flags; 54 unsigned long flags;
56 struct scatterlist *src_orig; 55 struct scatterlist *src_orig;
57 unsigned int nbytes_orig; 56 unsigned int nbytes_orig;
58 bool src_chained;
59 int src_nents; 57 int src_nents;
60 __be32 byte_count[2]; 58 __be32 byte_count[2];
61 u64 count; 59 u64 count;