aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStanimir Varbanov <svarbanov@mm-sol.com>2014-06-25 12:28:57 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2014-07-03 09:40:27 -0400
commitec8f5d8f6f76b939f662d6e83041abecabef0a34 (patch)
tree507b8aff2cec7d3f5b2a4b1f94aa3da9a2d3fd04
parent002c77a48b479b094b834b02ef78be47ceac76fd (diff)
crypto: qce - Qualcomm crypto engine driver
The driver is separated by functional parts. The core part implements a platform driver probe and remove callbaks. The probe enables clocks, checks crypto version, initialize and request dma channels, create done tasklet and init crypto queue and finally register the algorithms into crypto core subsystem. - DMA and SG helper functions implement dmaengine and sg-list helper functions used by other parts of the crypto driver. - ablkcipher algorithms implementation of AES, DES and 3DES crypto API callbacks, the crypto register alg function, the async request handler and its dma done callback function. - SHA and HMAC transforms implementation and registration of ahash crypto type. It includes sha1, sha256, hmac(sha1) and hmac(sha256). - infrastructure to setup the crypto hw contains functions used to setup/prepare hardware registers for all algorithms supported by the crypto block. It also exports few helper functions needed by algorithms: - to check hardware status - to start crypto hardware - to translate data stream to big endian form Adds register addresses and bit/masks used by the driver as well. Signed-off-by: Stanimir Varbanov <svarbanov@mm-sol.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/qce/ablkcipher.c431
-rw-r--r--drivers/crypto/qce/cipher.h68
-rw-r--r--drivers/crypto/qce/common.c435
-rw-r--r--drivers/crypto/qce/common.h102
-rw-r--r--drivers/crypto/qce/core.c286
-rw-r--r--drivers/crypto/qce/core.h68
-rw-r--r--drivers/crypto/qce/dma.c186
-rw-r--r--drivers/crypto/qce/dma.h58
-rw-r--r--drivers/crypto/qce/regs-v5.h334
-rw-r--r--drivers/crypto/qce/sha.c588
-rw-r--r--drivers/crypto/qce/sha.h81
11 files changed, 2637 insertions, 0 deletions
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
new file mode 100644
index 000000000000..ad592de475a4
--- /dev/null
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -0,0 +1,431 @@
1/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <crypto/aes.h>
18#include <crypto/algapi.h>
19#include <crypto/des.h>
20
21#include "cipher.h"
22
23static LIST_HEAD(ablkcipher_algs);
24
25static void qce_ablkcipher_done(void *data)
26{
27 struct crypto_async_request *async_req = data;
28 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
29 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
30 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
31 struct qce_device *qce = tmpl->qce;
32 enum dma_data_direction dir_src, dir_dst;
33 u32 status;
34 int error;
35 bool diff_dst;
36
37 diff_dst = (req->src != req->dst) ? true : false;
38 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
39 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
40
41 error = qce_dma_terminate_all(&qce->dma);
42 if (error)
43 dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
44 error);
45
46 if (diff_dst)
47 qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
48 rctx->dst_chained);
49 qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
50 rctx->dst_chained);
51
52 sg_free_table(&rctx->dst_tbl);
53
54 error = qce_check_status(qce, &status);
55 if (error < 0)
56 dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
57
58 qce->async_req_done(tmpl->qce, error);
59}
60
61static int
62qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
63{
64 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
65 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
66 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
67 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
68 struct qce_device *qce = tmpl->qce;
69 enum dma_data_direction dir_src, dir_dst;
70 struct scatterlist *sg;
71 bool diff_dst;
72 gfp_t gfp;
73 int ret;
74
75 rctx->iv = req->info;
76 rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
77 rctx->cryptlen = req->nbytes;
78
79 diff_dst = (req->src != req->dst) ? true : false;
80 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
81 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
82
83 rctx->src_nents = qce_countsg(req->src, req->nbytes,
84 &rctx->src_chained);
85 if (diff_dst) {
86 rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
87 &rctx->dst_chained);
88 } else {
89 rctx->dst_nents = rctx->src_nents;
90 rctx->dst_chained = rctx->src_chained;
91 }
92
93 rctx->dst_nents += 1;
94
95 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
96 GFP_KERNEL : GFP_ATOMIC;
97
98 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
99 if (ret)
100 return ret;
101
102 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
103
104 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
105 if (IS_ERR(sg)) {
106 ret = PTR_ERR(sg);
107 goto error_free;
108 }
109
110 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
111 if (IS_ERR(sg)) {
112 ret = PTR_ERR(sg);
113 goto error_free;
114 }
115
116 sg_mark_end(sg);
117 rctx->dst_sg = rctx->dst_tbl.sgl;
118
119 ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
120 rctx->dst_chained);
121 if (ret < 0)
122 goto error_free;
123
124 if (diff_dst) {
125 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
126 rctx->src_chained);
127 if (ret < 0)
128 goto error_unmap_dst;
129 rctx->src_sg = req->src;
130 } else {
131 rctx->src_sg = rctx->dst_sg;
132 }
133
134 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
135 rctx->dst_sg, rctx->dst_nents,
136 qce_ablkcipher_done, async_req);
137 if (ret)
138 goto error_unmap_src;
139
140 qce_dma_issue_pending(&qce->dma);
141
142 ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
143 if (ret)
144 goto error_terminate;
145
146 return 0;
147
148error_terminate:
149 qce_dma_terminate_all(&qce->dma);
150error_unmap_src:
151 if (diff_dst)
152 qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
153 rctx->src_chained);
154error_unmap_dst:
155 qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
156 rctx->dst_chained);
157error_free:
158 sg_free_table(&rctx->dst_tbl);
159 return ret;
160}
161
162static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
163 unsigned int keylen)
164{
165 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
166 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
167 unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
168 int ret;
169
170 if (!key || !keylen)
171 return -EINVAL;
172
173 if (IS_AES(flags)) {
174 switch (keylen) {
175 case AES_KEYSIZE_128:
176 case AES_KEYSIZE_256:
177 break;
178 default:
179 goto fallback;
180 }
181 } else if (IS_DES(flags)) {
182 u32 tmp[DES_EXPKEY_WORDS];
183
184 ret = des_ekey(tmp, key);
185 if (!ret && crypto_ablkcipher_get_flags(ablk) &
186 CRYPTO_TFM_REQ_WEAK_KEY)
187 goto weakkey;
188 }
189
190 ctx->enc_keylen = keylen;
191 memcpy(ctx->enc_key, key, keylen);
192 return 0;
193fallback:
194 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
195 if (!ret)
196 ctx->enc_keylen = keylen;
197 return ret;
198weakkey:
199 crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
200 return -EINVAL;
201}
202
203static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
204{
205 struct crypto_tfm *tfm =
206 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
207 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
208 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
209 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
210 int ret;
211
212 rctx->flags = tmpl->alg_flags;
213 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
214
215 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
216 ctx->enc_keylen != AES_KEYSIZE_256) {
217 ablkcipher_request_set_tfm(req, ctx->fallback);
218 ret = encrypt ? crypto_ablkcipher_encrypt(req) :
219 crypto_ablkcipher_decrypt(req);
220 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
221 return ret;
222 }
223
224 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
225}
226
227static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
228{
229 return qce_ablkcipher_crypt(req, 1);
230}
231
232static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
233{
234 return qce_ablkcipher_crypt(req, 0);
235}
236
237static int qce_ablkcipher_init(struct crypto_tfm *tfm)
238{
239 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
240
241 memset(ctx, 0, sizeof(*ctx));
242 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
243
244 ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
245 CRYPTO_ALG_TYPE_ABLKCIPHER,
246 CRYPTO_ALG_ASYNC |
247 CRYPTO_ALG_NEED_FALLBACK);
248 if (IS_ERR(ctx->fallback))
249 return PTR_ERR(ctx->fallback);
250
251 return 0;
252}
253
254static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
255{
256 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
257
258 crypto_free_ablkcipher(ctx->fallback);
259}
260
261struct qce_ablkcipher_def {
262 unsigned long flags;
263 const char *name;
264 const char *drv_name;
265 unsigned int blocksize;
266 unsigned int ivsize;
267 unsigned int min_keysize;
268 unsigned int max_keysize;
269};
270
271static const struct qce_ablkcipher_def ablkcipher_def[] = {
272 {
273 .flags = QCE_ALG_AES | QCE_MODE_ECB,
274 .name = "ecb(aes)",
275 .drv_name = "ecb-aes-qce",
276 .blocksize = AES_BLOCK_SIZE,
277 .ivsize = AES_BLOCK_SIZE,
278 .min_keysize = AES_MIN_KEY_SIZE,
279 .max_keysize = AES_MAX_KEY_SIZE,
280 },
281 {
282 .flags = QCE_ALG_AES | QCE_MODE_CBC,
283 .name = "cbc(aes)",
284 .drv_name = "cbc-aes-qce",
285 .blocksize = AES_BLOCK_SIZE,
286 .ivsize = AES_BLOCK_SIZE,
287 .min_keysize = AES_MIN_KEY_SIZE,
288 .max_keysize = AES_MAX_KEY_SIZE,
289 },
290 {
291 .flags = QCE_ALG_AES | QCE_MODE_CTR,
292 .name = "ctr(aes)",
293 .drv_name = "ctr-aes-qce",
294 .blocksize = AES_BLOCK_SIZE,
295 .ivsize = AES_BLOCK_SIZE,
296 .min_keysize = AES_MIN_KEY_SIZE,
297 .max_keysize = AES_MAX_KEY_SIZE,
298 },
299 {
300 .flags = QCE_ALG_AES | QCE_MODE_XTS,
301 .name = "xts(aes)",
302 .drv_name = "xts-aes-qce",
303 .blocksize = AES_BLOCK_SIZE,
304 .ivsize = AES_BLOCK_SIZE,
305 .min_keysize = AES_MIN_KEY_SIZE,
306 .max_keysize = AES_MAX_KEY_SIZE,
307 },
308 {
309 .flags = QCE_ALG_DES | QCE_MODE_ECB,
310 .name = "ecb(des)",
311 .drv_name = "ecb-des-qce",
312 .blocksize = DES_BLOCK_SIZE,
313 .ivsize = 0,
314 .min_keysize = DES_KEY_SIZE,
315 .max_keysize = DES_KEY_SIZE,
316 },
317 {
318 .flags = QCE_ALG_DES | QCE_MODE_CBC,
319 .name = "cbc(des)",
320 .drv_name = "cbc-des-qce",
321 .blocksize = DES_BLOCK_SIZE,
322 .ivsize = DES_BLOCK_SIZE,
323 .min_keysize = DES_KEY_SIZE,
324 .max_keysize = DES_KEY_SIZE,
325 },
326 {
327 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
328 .name = "ecb(des3_ede)",
329 .drv_name = "ecb-3des-qce",
330 .blocksize = DES3_EDE_BLOCK_SIZE,
331 .ivsize = 0,
332 .min_keysize = DES3_EDE_KEY_SIZE,
333 .max_keysize = DES3_EDE_KEY_SIZE,
334 },
335 {
336 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
337 .name = "cbc(des3_ede)",
338 .drv_name = "cbc-3des-qce",
339 .blocksize = DES3_EDE_BLOCK_SIZE,
340 .ivsize = DES3_EDE_BLOCK_SIZE,
341 .min_keysize = DES3_EDE_KEY_SIZE,
342 .max_keysize = DES3_EDE_KEY_SIZE,
343 },
344};
345
346static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
347 struct qce_device *qce)
348{
349 struct qce_alg_template *tmpl;
350 struct crypto_alg *alg;
351 int ret;
352
353 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
354 if (!tmpl)
355 return -ENOMEM;
356
357 alg = &tmpl->alg.crypto;
358
359 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
360 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
361 def->drv_name);
362
363 alg->cra_blocksize = def->blocksize;
364 alg->cra_ablkcipher.ivsize = def->ivsize;
365 alg->cra_ablkcipher.min_keysize = def->min_keysize;
366 alg->cra_ablkcipher.max_keysize = def->max_keysize;
367 alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
368 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
369 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
370
371 alg->cra_priority = 300;
372 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
373 CRYPTO_ALG_NEED_FALLBACK;
374 alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
375 alg->cra_alignmask = 0;
376 alg->cra_type = &crypto_ablkcipher_type;
377 alg->cra_module = THIS_MODULE;
378 alg->cra_init = qce_ablkcipher_init;
379 alg->cra_exit = qce_ablkcipher_exit;
380 INIT_LIST_HEAD(&alg->cra_list);
381
382 INIT_LIST_HEAD(&tmpl->entry);
383 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
384 tmpl->alg_flags = def->flags;
385 tmpl->qce = qce;
386
387 ret = crypto_register_alg(alg);
388 if (ret) {
389 kfree(tmpl);
390 dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
391 return ret;
392 }
393
394 list_add_tail(&tmpl->entry, &ablkcipher_algs);
395 dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
396 return 0;
397}
398
399static void qce_ablkcipher_unregister(struct qce_device *qce)
400{
401 struct qce_alg_template *tmpl, *n;
402
403 list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
404 crypto_unregister_alg(&tmpl->alg.crypto);
405 list_del(&tmpl->entry);
406 kfree(tmpl);
407 }
408}
409
410static int qce_ablkcipher_register(struct qce_device *qce)
411{
412 int ret, i;
413
414 for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
415 ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
416 if (ret)
417 goto err;
418 }
419
420 return 0;
421err:
422 qce_ablkcipher_unregister(qce);
423 return ret;
424}
425
426const struct qce_algo_ops ablkcipher_ops = {
427 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
428 .register_algs = qce_ablkcipher_register,
429 .unregister_algs = qce_ablkcipher_unregister,
430 .async_req_handle = qce_ablkcipher_async_req_handle,
431};
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
new file mode 100644
index 000000000000..d5757cfcda2d
--- /dev/null
+++ b/drivers/crypto/qce/cipher.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _CIPHER_H_
15#define _CIPHER_H_
16
17#include "common.h"
18#include "core.h"
19
20#define QCE_MAX_KEY_SIZE 64
21
22struct qce_cipher_ctx {
23 u8 enc_key[QCE_MAX_KEY_SIZE];
24 unsigned int enc_keylen;
25 struct crypto_ablkcipher *fallback;
26};
27
28/**
29 * struct qce_cipher_reqctx - holds private cipher objects per request
30 * @flags: operation flags
31 * @iv: pointer to the IV
32 * @ivsize: IV size
33 * @src_nents: source entries
34 * @dst_nents: destination entries
35 * @src_chained: is source chained
36 * @dst_chained: is destination chained
37 * @result_sg: scatterlist used for result buffer
38 * @dst_tbl: destination sg table
39 * @dst_sg: destination sg pointer table beginning
40 * @src_tbl: source sg table
41 * @src_sg: source sg pointer table beginning;
42 * @cryptlen: crypto length
43 */
44struct qce_cipher_reqctx {
45 unsigned long flags;
46 u8 *iv;
47 unsigned int ivsize;
48 int src_nents;
49 int dst_nents;
50 bool src_chained;
51 bool dst_chained;
52 struct scatterlist result_sg;
53 struct sg_table dst_tbl;
54 struct scatterlist *dst_sg;
55 struct sg_table src_tbl;
56 struct scatterlist *src_sg;
57 unsigned int cryptlen;
58};
59
60static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
61{
62 struct crypto_alg *alg = tfm->__crt_alg;
63 return container_of(alg, struct qce_alg_template, alg.crypto);
64}
65
66extern const struct qce_algo_ops ablkcipher_ops;
67
68#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
new file mode 100644
index 000000000000..1cd4d5ea8114
--- /dev/null
+++ b/drivers/crypto/qce/common.c
@@ -0,0 +1,435 @@
1/*
2 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <crypto/scatterwalk.h>
18#include <crypto/sha.h>
19
20#include "cipher.h"
21#include "common.h"
22#include "core.h"
23#include "regs-v5.h"
24#include "sha.h"
25
26#define QCE_SECTOR_SIZE 512
27
28static inline u32 qce_read(struct qce_device *qce, u32 offset)
29{
30 return readl(qce->base + offset);
31}
32
33static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
34{
35 writel(val, qce->base + offset);
36}
37
38static inline void qce_write_array(struct qce_device *qce, u32 offset,
39 const u32 *val, unsigned int len)
40{
41 int i;
42
43 for (i = 0; i < len; i++)
44 qce_write(qce, offset + i * sizeof(u32), val[i]);
45}
46
47static inline void
48qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
49{
50 int i;
51
52 for (i = 0; i < len; i++)
53 qce_write(qce, offset + i * sizeof(u32), 0);
54}
55
56static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
57{
58 u32 cfg = 0;
59
60 if (IS_AES(flags)) {
61 if (aes_key_size == AES_KEYSIZE_128)
62 cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
63 else if (aes_key_size == AES_KEYSIZE_256)
64 cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
65 }
66
67 if (IS_AES(flags))
68 cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
69 else if (IS_DES(flags) || IS_3DES(flags))
70 cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
71
72 if (IS_DES(flags))
73 cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
74
75 if (IS_3DES(flags))
76 cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
77
78 switch (flags & QCE_MODE_MASK) {
79 case QCE_MODE_ECB:
80 cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
81 break;
82 case QCE_MODE_CBC:
83 cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
84 break;
85 case QCE_MODE_CTR:
86 cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
87 break;
88 case QCE_MODE_XTS:
89 cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
90 break;
91 case QCE_MODE_CCM:
92 cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
93 cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
94 break;
95 default:
96 return ~0;
97 }
98
99 return cfg;
100}
101
102static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
103{
104 u32 cfg = 0;
105
106 if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
107 cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
108 else
109 cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
110
111 if (IS_CCM(flags) || IS_CMAC(flags)) {
112 if (key_size == AES_KEYSIZE_128)
113 cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
114 else if (key_size == AES_KEYSIZE_256)
115 cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
116 }
117
118 if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
119 cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
120 else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
121 cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
122 else if (IS_CMAC(flags))
123 cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
124
125 if (IS_SHA1(flags) || IS_SHA256(flags))
126 cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
127 else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
128 IS_CBC(flags) || IS_CTR(flags))
129 cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
130 else if (IS_AES(flags) && IS_CCM(flags))
131 cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
132 else if (IS_AES(flags) && IS_CMAC(flags))
133 cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
134
135 if (IS_SHA(flags) || IS_SHA_HMAC(flags))
136 cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
137
138 if (IS_CCM(flags))
139 cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
140
141 if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
142 IS_CMAC(flags))
143 cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
144
145 return cfg;
146}
147
148static u32 qce_config_reg(struct qce_device *qce, int little)
149{
150 u32 beats = (qce->burst_size >> 3) - 1;
151 u32 pipe_pair = qce->pipe_pair_id;
152 u32 config;
153
154 config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
155 config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
156 BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
157 config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
158 config &= ~HIGH_SPD_EN_N_SHIFT;
159
160 if (little)
161 config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
162
163 return config;
164}
165
166void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
167{
168 __be32 *d = dst;
169 const u8 *s = src;
170 unsigned int n;
171
172 n = len / sizeof(u32);
173 for (; n > 0; n--) {
174 *d = cpu_to_be32p((const __u32 *) s);
175 s += sizeof(__u32);
176 d++;
177 }
178}
179
180static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
181{
182 u8 swap[QCE_AES_IV_LENGTH];
183 u32 i, j;
184
185 if (ivsize > QCE_AES_IV_LENGTH)
186 return;
187
188 memset(swap, 0, QCE_AES_IV_LENGTH);
189
190 for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
191 i < QCE_AES_IV_LENGTH; i++, j--)
192 swap[i] = src[j];
193
194 qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
195}
196
197static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
198 unsigned int enckeylen, unsigned int cryptlen)
199{
200 u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
201 unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
202 unsigned int xtsdusize;
203
204 qce_cpu_to_be32p_array(xtskey, enckey + enckeylen / 2, enckeylen / 2);
205 qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
206
207 /* xts du size 512B */
208 xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
209 qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
210}
211
212static void qce_setup_config(struct qce_device *qce)
213{
214 u32 config;
215
216 /* get big endianness */
217 config = qce_config_reg(qce, 0);
218
219 /* clear status */
220 qce_write(qce, REG_STATUS, 0);
221 qce_write(qce, REG_CONFIG, config);
222}
223
224static inline void qce_crypto_go(struct qce_device *qce)
225{
226 qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
227}
228
229static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
230 u32 totallen, u32 offset)
231{
232 struct ahash_request *req = ahash_request_cast(async_req);
233 struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
234 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
235 struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
236 struct qce_device *qce = tmpl->qce;
237 unsigned int digestsize = crypto_ahash_digestsize(ahash);
238 unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
239 __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
240 __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
241 u32 auth_cfg = 0, config;
242 unsigned int iv_words;
243
244 /* if not the last, the size has to be on the block boundary */
245 if (!rctx->last_blk && req->nbytes % blocksize)
246 return -EINVAL;
247
248 qce_setup_config(qce);
249
250 if (IS_CMAC(rctx->flags)) {
251 qce_write(qce, REG_AUTH_SEG_CFG, 0);
252 qce_write(qce, REG_ENCR_SEG_CFG, 0);
253 qce_write(qce, REG_ENCR_SEG_SIZE, 0);
254 qce_clear_array(qce, REG_AUTH_IV0, 16);
255 qce_clear_array(qce, REG_AUTH_KEY0, 16);
256 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
257
258 auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
259 }
260
261 if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
262 u32 authkey_words = rctx->authklen / sizeof(u32);
263
264 qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
265 qce_write_array(qce, REG_AUTH_KEY0, mackey, authkey_words);
266 }
267
268 if (IS_CMAC(rctx->flags))
269 goto go_proc;
270
271 if (rctx->first_blk)
272 memcpy(auth, rctx->digest, digestsize);
273 else
274 qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
275
276 iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
277 qce_write_array(qce, REG_AUTH_IV0, auth, iv_words);
278
279 if (rctx->first_blk)
280 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
281 else
282 qce_write_array(qce, REG_AUTH_BYTECNT0, rctx->byte_count, 2);
283
284 auth_cfg = qce_auth_cfg(rctx->flags, 0);
285
286 if (rctx->last_blk)
287 auth_cfg |= BIT(AUTH_LAST_SHIFT);
288 else
289 auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
290
291 if (rctx->first_blk)
292 auth_cfg |= BIT(AUTH_FIRST_SHIFT);
293 else
294 auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
295
296go_proc:
297 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
298 qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
299 qce_write(qce, REG_AUTH_SEG_START, 0);
300 qce_write(qce, REG_ENCR_SEG_CFG, 0);
301 qce_write(qce, REG_SEG_SIZE, req->nbytes);
302
303 /* get little endianness */
304 config = qce_config_reg(qce, 1);
305 qce_write(qce, REG_CONFIG, config);
306
307 qce_crypto_go(qce);
308
309 return 0;
310}
311
312static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
313 u32 totallen, u32 offset)
314{
315 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
316 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
317 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
318 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
319 struct qce_device *qce = tmpl->qce;
320 __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
321 __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
322 unsigned int enckey_words, enciv_words;
323 unsigned int keylen;
324 u32 encr_cfg = 0, auth_cfg = 0, config;
325 unsigned int ivsize = rctx->ivsize;
326 unsigned long flags = rctx->flags;
327
328 qce_setup_config(qce);
329
330 if (IS_XTS(flags))
331 keylen = ctx->enc_keylen / 2;
332 else
333 keylen = ctx->enc_keylen;
334
335 qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
336 enckey_words = keylen / sizeof(u32);
337
338 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
339
340 encr_cfg = qce_encr_cfg(flags, keylen);
341
342 if (IS_DES(flags)) {
343 enciv_words = 2;
344 enckey_words = 2;
345 } else if (IS_3DES(flags)) {
346 enciv_words = 2;
347 enckey_words = 6;
348 } else if (IS_AES(flags)) {
349 if (IS_XTS(flags))
350 qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
351 rctx->cryptlen);
352 enciv_words = 4;
353 } else {
354 return -EINVAL;
355 }
356
357 qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words);
358
359 if (!IS_ECB(flags)) {
360 if (IS_XTS(flags))
361 qce_xts_swapiv(enciv, rctx->iv, ivsize);
362 else
363 qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
364
365 qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words);
366 }
367
368 if (IS_ENCRYPT(flags))
369 encr_cfg |= BIT(ENCODE_SHIFT);
370
371 qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
372 qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
373 qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
374
375 if (IS_CTR(flags)) {
376 qce_write(qce, REG_CNTR_MASK, ~0);
377 qce_write(qce, REG_CNTR_MASK0, ~0);
378 qce_write(qce, REG_CNTR_MASK1, ~0);
379 qce_write(qce, REG_CNTR_MASK2, ~0);
380 }
381
382 qce_write(qce, REG_SEG_SIZE, totallen);
383
384 /* get little endianness */
385 config = qce_config_reg(qce, 1);
386 qce_write(qce, REG_CONFIG, config);
387
388 qce_crypto_go(qce);
389
390 return 0;
391}
392
393int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
394 u32 offset)
395{
396 switch (type) {
397 case CRYPTO_ALG_TYPE_ABLKCIPHER:
398 return qce_setup_regs_ablkcipher(async_req, totallen, offset);
399 case CRYPTO_ALG_TYPE_AHASH:
400 return qce_setup_regs_ahash(async_req, totallen, offset);
401 default:
402 return -EINVAL;
403 }
404}
405
406#define STATUS_ERRORS \
407 (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
408
409int qce_check_status(struct qce_device *qce, u32 *status)
410{
411 int ret = 0;
412
413 *status = qce_read(qce, REG_STATUS);
414
415 /*
416 * Don't use result dump status. The operation may not be complete.
417 * Instead, use the status we just read from device. In case, we need to
418 * use result_status from result dump the result_status needs to be byte
419 * swapped, since we set the device to little endian.
420 */
421 if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
422 ret = -ENXIO;
423
424 return ret;
425}
426
427void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
428{
429 u32 val;
430
431 val = qce_read(qce, REG_VERSION);
432 *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
433 *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
434 *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
435}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
new file mode 100644
index 000000000000..411b1fc97216
--- /dev/null
+++ b/drivers/crypto/qce/common.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _COMMON_H_
15#define _COMMON_H_
16
17#include <linux/crypto.h>
18#include <linux/types.h>
19#include <crypto/aes.h>
20#include <crypto/hash.h>
21
22/* key size in bytes */
23#define QCE_SHA_HMAC_KEY_SIZE 64
24#define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256
25
26/* IV length in bytes */
27#define QCE_AES_IV_LENGTH AES_BLOCK_SIZE
28/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
29#define QCE_MAX_IV_SIZE AES_BLOCK_SIZE
30
31/* maximum nonce bytes */
32#define QCE_MAX_NONCE 16
33#define QCE_MAX_NONCE_WORDS (QCE_MAX_NONCE / sizeof(u32))
34
35/* burst size alignment requirement */
36#define QCE_MAX_ALIGN_SIZE 64
37
38/* cipher algorithms */
39#define QCE_ALG_DES BIT(0)
40#define QCE_ALG_3DES BIT(1)
41#define QCE_ALG_AES BIT(2)
42
43/* hash and hmac algorithms */
44#define QCE_HASH_SHA1 BIT(3)
45#define QCE_HASH_SHA256 BIT(4)
46#define QCE_HASH_SHA1_HMAC BIT(5)
47#define QCE_HASH_SHA256_HMAC BIT(6)
48#define QCE_HASH_AES_CMAC BIT(7)
49
50/* cipher modes */
51#define QCE_MODE_CBC BIT(8)
52#define QCE_MODE_ECB BIT(9)
53#define QCE_MODE_CTR BIT(10)
54#define QCE_MODE_XTS BIT(11)
55#define QCE_MODE_CCM BIT(12)
56#define QCE_MODE_MASK GENMASK(12, 8)
57
58/* cipher encryption/decryption operations */
59#define QCE_ENCRYPT BIT(13)
60#define QCE_DECRYPT BIT(14)
61
62#define IS_DES(flags) (flags & QCE_ALG_DES)
63#define IS_3DES(flags) (flags & QCE_ALG_3DES)
64#define IS_AES(flags) (flags & QCE_ALG_AES)
65
66#define IS_SHA1(flags) (flags & QCE_HASH_SHA1)
67#define IS_SHA256(flags) (flags & QCE_HASH_SHA256)
68#define IS_SHA1_HMAC(flags) (flags & QCE_HASH_SHA1_HMAC)
69#define IS_SHA256_HMAC(flags) (flags & QCE_HASH_SHA256_HMAC)
70#define IS_CMAC(flags) (flags & QCE_HASH_AES_CMAC)
71#define IS_SHA(flags) (IS_SHA1(flags) || IS_SHA256(flags))
72#define IS_SHA_HMAC(flags) \
73 (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
74
75#define IS_CBC(mode) (mode & QCE_MODE_CBC)
76#define IS_ECB(mode) (mode & QCE_MODE_ECB)
77#define IS_CTR(mode) (mode & QCE_MODE_CTR)
78#define IS_XTS(mode) (mode & QCE_MODE_XTS)
79#define IS_CCM(mode) (mode & QCE_MODE_CCM)
80
81#define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT)
82#define IS_DECRYPT(dir) (dir & QCE_DECRYPT)
83
84struct qce_alg_template {
85 struct list_head entry;
86 u32 crypto_alg_type;
87 unsigned long alg_flags;
88 const __be32 *std_iv;
89 union {
90 struct crypto_alg crypto;
91 struct ahash_alg ahash;
92 } alg;
93 struct qce_device *qce;
94};
95
96void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
97int qce_check_status(struct qce_device *qce, u32 *status);
98void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
99int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
100 u32 offset);
101
102#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
new file mode 100644
index 000000000000..33ae3545dc48
--- /dev/null
+++ b/drivers/crypto/qce/core.c
@@ -0,0 +1,286 @@
1/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/interrupt.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/spinlock.h>
19#include <linux/types.h>
20#include <crypto/algapi.h>
21#include <crypto/internal/hash.h>
22#include <crypto/sha.h>
23
24#include "core.h"
25#include "cipher.h"
26#include "sha.h"
27
28#define QCE_MAJOR_VERSION5 0x05
29#define QCE_QUEUE_LENGTH 1
30
31static const struct qce_algo_ops *qce_ops[] = {
32 &ablkcipher_ops,
33 &ahash_ops,
34};
35
36static void qce_unregister_algs(struct qce_device *qce)
37{
38 const struct qce_algo_ops *ops;
39 int i;
40
41 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
42 ops = qce_ops[i];
43 ops->unregister_algs(qce);
44 }
45}
46
47static int qce_register_algs(struct qce_device *qce)
48{
49 const struct qce_algo_ops *ops;
50 int i, ret = -ENODEV;
51
52 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
53 ops = qce_ops[i];
54 ret = ops->register_algs(qce);
55 if (ret)
56 break;
57 }
58
59 return ret;
60}
61
62static int qce_handle_request(struct crypto_async_request *async_req)
63{
64 int ret = -EINVAL, i;
65 const struct qce_algo_ops *ops;
66 u32 type = crypto_tfm_alg_type(async_req->tfm);
67
68 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
69 ops = qce_ops[i];
70 if (type != ops->type)
71 continue;
72 ret = ops->async_req_handle(async_req);
73 break;
74 }
75
76 return ret;
77}
78
79static int qce_handle_queue(struct qce_device *qce,
80 struct crypto_async_request *req)
81{
82 struct crypto_async_request *async_req, *backlog;
83 unsigned long flags;
84 int ret = 0, err;
85
86 spin_lock_irqsave(&qce->lock, flags);
87
88 if (req)
89 ret = crypto_enqueue_request(&qce->queue, req);
90
91 /* busy, do not dequeue request */
92 if (qce->req) {
93 spin_unlock_irqrestore(&qce->lock, flags);
94 return ret;
95 }
96
97 backlog = crypto_get_backlog(&qce->queue);
98 async_req = crypto_dequeue_request(&qce->queue);
99 if (async_req)
100 qce->req = async_req;
101
102 spin_unlock_irqrestore(&qce->lock, flags);
103
104 if (!async_req)
105 return ret;
106
107 if (backlog) {
108 spin_lock_bh(&qce->lock);
109 backlog->complete(backlog, -EINPROGRESS);
110 spin_unlock_bh(&qce->lock);
111 }
112
113 err = qce_handle_request(async_req);
114 if (err) {
115 qce->result = err;
116 tasklet_schedule(&qce->done_tasklet);
117 }
118
119 return ret;
120}
121
122static void qce_tasklet_req_done(unsigned long data)
123{
124 struct qce_device *qce = (struct qce_device *)data;
125 struct crypto_async_request *req;
126 unsigned long flags;
127
128 spin_lock_irqsave(&qce->lock, flags);
129 req = qce->req;
130 qce->req = NULL;
131 spin_unlock_irqrestore(&qce->lock, flags);
132
133 if (req)
134 req->complete(req, qce->result);
135
136 qce_handle_queue(qce, NULL);
137}
138
139static int qce_async_request_enqueue(struct qce_device *qce,
140 struct crypto_async_request *req)
141{
142 return qce_handle_queue(qce, req);
143}
144
145static void qce_async_request_done(struct qce_device *qce, int ret)
146{
147 qce->result = ret;
148 tasklet_schedule(&qce->done_tasklet);
149}
150
151static int qce_check_version(struct qce_device *qce)
152{
153 u32 major, minor, step;
154
155 qce_get_version(qce, &major, &minor, &step);
156
157 /*
158 * the driver does not support v5 with minor 0 because it has special
159 * alignment requirements.
160 */
161 if (major != QCE_MAJOR_VERSION5 || minor == 0)
162 return -ENODEV;
163
164 qce->burst_size = QCE_BAM_BURST_SIZE;
165 qce->pipe_pair_id = 1;
166
167 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
168 major, minor, step);
169
170 return 0;
171}
172
173static int qce_crypto_probe(struct platform_device *pdev)
174{
175 struct device *dev = &pdev->dev;
176 struct qce_device *qce;
177 struct resource *res;
178 int ret;
179
180 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
181 if (!qce)
182 return -ENOMEM;
183
184 qce->dev = dev;
185 platform_set_drvdata(pdev, qce);
186
187 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
188 qce->base = devm_ioremap_resource(&pdev->dev, res);
189 if (IS_ERR(qce->base))
190 return PTR_ERR(qce->base);
191
192 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
193 if (ret < 0)
194 return ret;
195
196 qce->core = devm_clk_get(qce->dev, "core");
197 if (IS_ERR(qce->core))
198 return PTR_ERR(qce->core);
199
200 qce->iface = devm_clk_get(qce->dev, "iface");
201 if (IS_ERR(qce->iface))
202 return PTR_ERR(qce->iface);
203
204 qce->bus = devm_clk_get(qce->dev, "bus");
205 if (IS_ERR(qce->bus))
206 return PTR_ERR(qce->bus);
207
208 ret = clk_prepare_enable(qce->core);
209 if (ret)
210 return ret;
211
212 ret = clk_prepare_enable(qce->iface);
213 if (ret)
214 goto err_clks_core;
215
216 ret = clk_prepare_enable(qce->bus);
217 if (ret)
218 goto err_clks_iface;
219
220 ret = qce_dma_request(qce->dev, &qce->dma);
221 if (ret)
222 goto err_clks;
223
224 ret = qce_check_version(qce);
225 if (ret)
226 goto err_clks;
227
228 spin_lock_init(&qce->lock);
229 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
230 (unsigned long)qce);
231 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
232
233 qce->async_req_enqueue = qce_async_request_enqueue;
234 qce->async_req_done = qce_async_request_done;
235
236 ret = qce_register_algs(qce);
237 if (ret)
238 goto err_dma;
239
240 return 0;
241
242err_dma:
243 qce_dma_release(&qce->dma);
244err_clks:
245 clk_disable_unprepare(qce->bus);
246err_clks_iface:
247 clk_disable_unprepare(qce->iface);
248err_clks_core:
249 clk_disable_unprepare(qce->core);
250 return ret;
251}
252
253static int qce_crypto_remove(struct platform_device *pdev)
254{
255 struct qce_device *qce = platform_get_drvdata(pdev);
256
257 tasklet_kill(&qce->done_tasklet);
258 qce_unregister_algs(qce);
259 qce_dma_release(&qce->dma);
260 clk_disable_unprepare(qce->bus);
261 clk_disable_unprepare(qce->iface);
262 clk_disable_unprepare(qce->core);
263 return 0;
264}
265
266static const struct of_device_id qce_crypto_of_match[] = {
267 { .compatible = "qcom,crypto-v5.1", },
268 {}
269};
270MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
271
272static struct platform_driver qce_crypto_driver = {
273 .probe = qce_crypto_probe,
274 .remove = qce_crypto_remove,
275 .driver = {
276 .owner = THIS_MODULE,
277 .name = KBUILD_MODNAME,
278 .of_match_table = qce_crypto_of_match,
279 },
280};
281module_platform_driver(qce_crypto_driver);
282
283MODULE_LICENSE("GPL v2");
284MODULE_DESCRIPTION("Qualcomm crypto engine driver");
285MODULE_ALIAS("platform:" KBUILD_MODNAME);
286MODULE_AUTHOR("The Linux Foundation");
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
new file mode 100644
index 000000000000..549965d4d91f
--- /dev/null
+++ b/drivers/crypto/qce/core.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _CORE_H_
15#define _CORE_H_
16
17#include "dma.h"
18
19/**
20 * struct qce_device - crypto engine device structure
21 * @queue: crypto request queue
22 * @lock: the lock protects queue and req
23 * @done_tasklet: done tasklet object
24 * @req: current active request
25 * @result: result of current transform
26 * @base: virtual IO base
27 * @dev: pointer to device structure
28 * @core: core device clock
29 * @iface: interface clock
30 * @bus: bus clock
31 * @dma: pointer to dma data
32 * @burst_size: the crypto burst size
33 * @pipe_pair_id: which pipe pair id the device using
34 * @async_req_enqueue: invoked by every algorithm to enqueue a request
35 * @async_req_done: invoked by every algorithm to finish its request
36 */
37struct qce_device {
38 struct crypto_queue queue;
39 spinlock_t lock;
40 struct tasklet_struct done_tasklet;
41 struct crypto_async_request *req;
42 int result;
43 void __iomem *base;
44 struct device *dev;
45 struct clk *core, *iface, *bus;
46 struct qce_dma_data dma;
47 int burst_size;
48 unsigned int pipe_pair_id;
49 int (*async_req_enqueue)(struct qce_device *qce,
50 struct crypto_async_request *req);
51 void (*async_req_done)(struct qce_device *qce, int ret);
52};
53
54/**
55 * struct qce_algo_ops - algorithm operations per crypto type
56 * @type: should be CRYPTO_ALG_TYPE_XXX
57 * @register_algs: invoked by core to register the algorithms
58 * @unregister_algs: invoked by core to unregister the algorithms
59 * @async_req_handle: invoked by core to handle enqueued request
60 */
61struct qce_algo_ops {
62 u32 type;
63 int (*register_algs)(struct qce_device *qce);
64 void (*unregister_algs)(struct qce_device *qce);
65 int (*async_req_handle)(struct crypto_async_request *async_req);
66};
67
68#endif /* _CORE_H_ */
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
new file mode 100644
index 000000000000..0fb21e13f247
--- /dev/null
+++ b/drivers/crypto/qce/dma.c
@@ -0,0 +1,186 @@
1/*
2 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/dmaengine.h>
15#include <crypto/scatterwalk.h>
16
17#include "dma.h"
18
19int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
20{
21 int ret;
22
23 dma->txchan = dma_request_slave_channel_reason(dev, "tx");
24 if (IS_ERR(dma->txchan))
25 return PTR_ERR(dma->txchan);
26
27 dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
28 if (IS_ERR(dma->rxchan)) {
29 ret = PTR_ERR(dma->rxchan);
30 goto error_rx;
31 }
32
33 dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
34 GFP_KERNEL);
35 if (!dma->result_buf) {
36 ret = -ENOMEM;
37 goto error_nomem;
38 }
39
40 dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
41
42 return 0;
43error_nomem:
44 dma_release_channel(dma->rxchan);
45error_rx:
46 dma_release_channel(dma->txchan);
47 return ret;
48}
49
50void qce_dma_release(struct qce_dma_data *dma)
51{
52 dma_release_channel(dma->txchan);
53 dma_release_channel(dma->rxchan);
54 kfree(dma->result_buf);
55}
56
57int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
58 enum dma_data_direction dir, bool chained)
59{
60 int err;
61
62 if (chained) {
63 while (sg) {
64 err = dma_map_sg(dev, sg, 1, dir);
65 if (!err)
66 return -EFAULT;
67 sg = scatterwalk_sg_next(sg);
68 }
69 } else {
70 err = dma_map_sg(dev, sg, nents, dir);
71 if (!err)
72 return -EFAULT;
73 }
74
75 return nents;
76}
77
78void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
79 enum dma_data_direction dir, bool chained)
80{
81 if (chained)
82 while (sg) {
83 dma_unmap_sg(dev, sg, 1, dir);
84 sg = scatterwalk_sg_next(sg);
85 }
86 else
87 dma_unmap_sg(dev, sg, nents, dir);
88}
89
90int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
91{
92 struct scatterlist *sg = sglist;
93 int nents = 0;
94
95 if (chained)
96 *chained = false;
97
98 while (nbytes > 0 && sg) {
99 nents++;
100 nbytes -= sg->length;
101 if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
102 *chained = true;
103 sg = scatterwalk_sg_next(sg);
104 }
105
106 return nents;
107}
108
109struct scatterlist *
110qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
111{
112 struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
113
114 while (sg) {
115 if (!sg_page(sg))
116 break;
117 sg = sg_next(sg);
118 }
119
120 if (!sg)
121 return ERR_PTR(-EINVAL);
122
123 while (new_sgl && sg) {
124 sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
125 new_sgl->offset);
126 sg_last = sg;
127 sg = sg_next(sg);
128 new_sgl = sg_next(new_sgl);
129 }
130
131 return sg_last;
132}
133
134static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
135 int nents, unsigned long flags,
136 enum dma_transfer_direction dir,
137 dma_async_tx_callback cb, void *cb_param)
138{
139 struct dma_async_tx_descriptor *desc;
140 dma_cookie_t cookie;
141
142 if (!sg || !nents)
143 return -EINVAL;
144
145 desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
146 if (!desc)
147 return -EINVAL;
148
149 desc->callback = cb;
150 desc->callback_param = cb_param;
151 cookie = dmaengine_submit(desc);
152
153 return dma_submit_error(cookie);
154}
155
156int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
157 int rx_nents, struct scatterlist *tx_sg, int tx_nents,
158 dma_async_tx_callback cb, void *cb_param)
159{
160 struct dma_chan *rxchan = dma->rxchan;
161 struct dma_chan *txchan = dma->txchan;
162 unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
163 int ret;
164
165 ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
166 NULL, NULL);
167 if (ret)
168 return ret;
169
170 return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
171 cb, cb_param);
172}
173
174void qce_dma_issue_pending(struct qce_dma_data *dma)
175{
176 dma_async_issue_pending(dma->rxchan);
177 dma_async_issue_pending(dma->txchan);
178}
179
180int qce_dma_terminate_all(struct qce_dma_data *dma)
181{
182 int ret;
183
184 ret = dmaengine_terminate_all(dma->rxchan);
185 return ret ?: dmaengine_terminate_all(dma->txchan);
186}
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
new file mode 100644
index 000000000000..805e378d59e9
--- /dev/null
+++ b/drivers/crypto/qce/dma.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _DMA_H_
15#define _DMA_H_
16
17/* maximum data transfer block size between BAM and CE */
18#define QCE_BAM_BURST_SIZE 64
19
20#define QCE_AUTHIV_REGS_CNT 16
21#define QCE_AUTH_BYTECOUNT_REGS_CNT 4
22#define QCE_CNTRIV_REGS_CNT 4
23
24struct qce_result_dump {
25 u32 auth_iv[QCE_AUTHIV_REGS_CNT];
26 u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
27 u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
28 u32 status;
29 u32 status2;
30};
31
32#define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE)
33#define QCE_RESULT_BUF_SZ \
34 ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
35
36struct qce_dma_data {
37 struct dma_chan *txchan;
38 struct dma_chan *rxchan;
39 struct qce_result_dump *result_buf;
40 void *ignore_buf;
41};
42
43int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
44void qce_dma_release(struct qce_dma_data *dma);
45int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
46 int in_ents, struct scatterlist *sg_out, int out_ents,
47 dma_async_tx_callback cb, void *cb_param);
48void qce_dma_issue_pending(struct qce_dma_data *dma);
49int qce_dma_terminate_all(struct qce_dma_data *dma);
50int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
51void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
52 enum dma_data_direction dir, bool chained);
53int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
54 enum dma_data_direction dir, bool chained);
55struct scatterlist *
56qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
57
58#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h
new file mode 100644
index 000000000000..f0e19e35664a
--- /dev/null
+++ b/drivers/crypto/qce/regs-v5.h
@@ -0,0 +1,334 @@
1/*
2 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _REGS_V5_H_
15#define _REGS_V5_H_
16
17#include <linux/bitops.h>
18
19#define REG_VERSION 0x000
20#define REG_STATUS 0x100
21#define REG_STATUS2 0x104
22#define REG_ENGINES_AVAIL 0x108
23#define REG_FIFO_SIZES 0x10c
24#define REG_SEG_SIZE 0x110
25#define REG_GOPROC 0x120
26#define REG_ENCR_SEG_CFG 0x200
27#define REG_ENCR_SEG_SIZE 0x204
28#define REG_ENCR_SEG_START 0x208
29#define REG_CNTR0_IV0 0x20c
30#define REG_CNTR1_IV1 0x210
31#define REG_CNTR2_IV2 0x214
32#define REG_CNTR3_IV3 0x218
33#define REG_CNTR_MASK 0x21C
34#define REG_ENCR_CCM_INT_CNTR0 0x220
35#define REG_ENCR_CCM_INT_CNTR1 0x224
36#define REG_ENCR_CCM_INT_CNTR2 0x228
37#define REG_ENCR_CCM_INT_CNTR3 0x22c
38#define REG_ENCR_XTS_DU_SIZE 0x230
39#define REG_CNTR_MASK2 0x234
40#define REG_CNTR_MASK1 0x238
41#define REG_CNTR_MASK0 0x23c
42#define REG_AUTH_SEG_CFG 0x300
43#define REG_AUTH_SEG_SIZE 0x304
44#define REG_AUTH_SEG_START 0x308
45#define REG_AUTH_IV0 0x310
46#define REG_AUTH_IV1 0x314
47#define REG_AUTH_IV2 0x318
48#define REG_AUTH_IV3 0x31c
49#define REG_AUTH_IV4 0x320
50#define REG_AUTH_IV5 0x324
51#define REG_AUTH_IV6 0x328
52#define REG_AUTH_IV7 0x32c
53#define REG_AUTH_IV8 0x330
54#define REG_AUTH_IV9 0x334
55#define REG_AUTH_IV10 0x338
56#define REG_AUTH_IV11 0x33c
57#define REG_AUTH_IV12 0x340
58#define REG_AUTH_IV13 0x344
59#define REG_AUTH_IV14 0x348
60#define REG_AUTH_IV15 0x34c
61#define REG_AUTH_INFO_NONCE0 0x350
62#define REG_AUTH_INFO_NONCE1 0x354
63#define REG_AUTH_INFO_NONCE2 0x358
64#define REG_AUTH_INFO_NONCE3 0x35c
65#define REG_AUTH_BYTECNT0 0x390
66#define REG_AUTH_BYTECNT1 0x394
67#define REG_AUTH_BYTECNT2 0x398
68#define REG_AUTH_BYTECNT3 0x39c
69#define REG_AUTH_EXP_MAC0 0x3a0
70#define REG_AUTH_EXP_MAC1 0x3a4
71#define REG_AUTH_EXP_MAC2 0x3a8
72#define REG_AUTH_EXP_MAC3 0x3ac
73#define REG_AUTH_EXP_MAC4 0x3b0
74#define REG_AUTH_EXP_MAC5 0x3b4
75#define REG_AUTH_EXP_MAC6 0x3b8
76#define REG_AUTH_EXP_MAC7 0x3bc
77#define REG_CONFIG 0x400
78#define REG_GOPROC_QC_KEY 0x1000
79#define REG_GOPROC_OEM_KEY 0x2000
80#define REG_ENCR_KEY0 0x3000
81#define REG_ENCR_KEY1 0x3004
82#define REG_ENCR_KEY2 0x3008
83#define REG_ENCR_KEY3 0x300c
84#define REG_ENCR_KEY4 0x3010
85#define REG_ENCR_KEY5 0x3014
86#define REG_ENCR_KEY6 0x3018
87#define REG_ENCR_KEY7 0x301c
88#define REG_ENCR_XTS_KEY0 0x3020
89#define REG_ENCR_XTS_KEY1 0x3024
90#define REG_ENCR_XTS_KEY2 0x3028
91#define REG_ENCR_XTS_KEY3 0x302c
92#define REG_ENCR_XTS_KEY4 0x3030
93#define REG_ENCR_XTS_KEY5 0x3034
94#define REG_ENCR_XTS_KEY6 0x3038
95#define REG_ENCR_XTS_KEY7 0x303c
96#define REG_AUTH_KEY0 0x3040
97#define REG_AUTH_KEY1 0x3044
98#define REG_AUTH_KEY2 0x3048
99#define REG_AUTH_KEY3 0x304c
100#define REG_AUTH_KEY4 0x3050
101#define REG_AUTH_KEY5 0x3054
102#define REG_AUTH_KEY6 0x3058
103#define REG_AUTH_KEY7 0x305c
104#define REG_AUTH_KEY8 0x3060
105#define REG_AUTH_KEY9 0x3064
106#define REG_AUTH_KEY10 0x3068
107#define REG_AUTH_KEY11 0x306c
108#define REG_AUTH_KEY12 0x3070
109#define REG_AUTH_KEY13 0x3074
110#define REG_AUTH_KEY14 0x3078
111#define REG_AUTH_KEY15 0x307c
112
113/* Register bits - REG_VERSION */
114#define CORE_STEP_REV_SHIFT 0
115#define CORE_STEP_REV_MASK GENMASK(15, 0)
116#define CORE_MINOR_REV_SHIFT 16
117#define CORE_MINOR_REV_MASK GENMASK(23, 16)
118#define CORE_MAJOR_REV_SHIFT 24
119#define CORE_MAJOR_REV_MASK GENMASK(31, 24)
120
121/* Register bits - REG_STATUS */
122#define MAC_FAILED_SHIFT 31
123#define DOUT_SIZE_AVAIL_SHIFT 26
124#define DOUT_SIZE_AVAIL_MASK GENMASK(30, 26)
125#define DIN_SIZE_AVAIL_SHIFT 21
126#define DIN_SIZE_AVAIL_MASK GENMASK(25, 21)
127#define HSD_ERR_SHIFT 20
128#define ACCESS_VIOL_SHIFT 19
129#define PIPE_ACTIVE_ERR_SHIFT 18
130#define CFG_CHNG_ERR_SHIFT 17
131#define DOUT_ERR_SHIFT 16
132#define DIN_ERR_SHIFT 15
133#define AXI_ERR_SHIFT 14
134#define CRYPTO_STATE_SHIFT 10
135#define CRYPTO_STATE_MASK GENMASK(13, 10)
136#define ENCR_BUSY_SHIFT 9
137#define AUTH_BUSY_SHIFT 8
138#define DOUT_INTR_SHIFT 7
139#define DIN_INTR_SHIFT 6
140#define OP_DONE_INTR_SHIFT 5
141#define ERR_INTR_SHIFT 4
142#define DOUT_RDY_SHIFT 3
143#define DIN_RDY_SHIFT 2
144#define OPERATION_DONE_SHIFT 1
145#define SW_ERR_SHIFT 0
146
147/* Register bits - REG_STATUS2 */
148#define AXI_EXTRA_SHIFT 1
149#define LOCKED_SHIFT 2
150
151/* Register bits - REG_CONFIG */
152#define REQ_SIZE_SHIFT 17
153#define REQ_SIZE_MASK GENMASK(20, 17)
154#define REQ_SIZE_ENUM_1_BEAT 0
155#define REQ_SIZE_ENUM_2_BEAT 1
156#define REQ_SIZE_ENUM_3_BEAT 2
157#define REQ_SIZE_ENUM_4_BEAT 3
158#define REQ_SIZE_ENUM_5_BEAT 4
159#define REQ_SIZE_ENUM_6_BEAT 5
160#define REQ_SIZE_ENUM_7_BEAT 6
161#define REQ_SIZE_ENUM_8_BEAT 7
162#define REQ_SIZE_ENUM_9_BEAT 8
163#define REQ_SIZE_ENUM_10_BEAT 9
164#define REQ_SIZE_ENUM_11_BEAT 10
165#define REQ_SIZE_ENUM_12_BEAT 11
166#define REQ_SIZE_ENUM_13_BEAT 12
167#define REQ_SIZE_ENUM_14_BEAT 13
168#define REQ_SIZE_ENUM_15_BEAT 14
169#define REQ_SIZE_ENUM_16_BEAT 15
170
171#define MAX_QUEUED_REQ_SHIFT 14
172#define MAX_QUEUED_REQ_MASK GENMASK(24, 16)
173#define ENUM_1_QUEUED_REQS 0
174#define ENUM_2_QUEUED_REQS 1
175#define ENUM_3_QUEUED_REQS 2
176
177#define IRQ_ENABLES_SHIFT 10
178#define IRQ_ENABLES_MASK GENMASK(13, 10)
179
180#define LITTLE_ENDIAN_MODE_SHIFT 9
181#define PIPE_SET_SELECT_SHIFT 5
182#define PIPE_SET_SELECT_MASK GENMASK(8, 5)
183
184#define HIGH_SPD_EN_N_SHIFT 4
185#define MASK_DOUT_INTR_SHIFT 3
186#define MASK_DIN_INTR_SHIFT 2
187#define MASK_OP_DONE_INTR_SHIFT 1
188#define MASK_ERR_INTR_SHIFT 0
189
190/* Register bits - REG_AUTH_SEG_CFG */
191#define COMP_EXP_MAC_SHIFT 24
192#define COMP_EXP_MAC_DISABLED 0
193#define COMP_EXP_MAC_ENABLED 1
194
195#define F9_DIRECTION_SHIFT 23
196#define F9_DIRECTION_UPLINK 0
197#define F9_DIRECTION_DOWNLINK 1
198
199#define AUTH_NONCE_NUM_WORDS_SHIFT 20
200#define AUTH_NONCE_NUM_WORDS_MASK GENMASK(22, 20)
201
202#define USE_PIPE_KEY_AUTH_SHIFT 19
203#define USE_HW_KEY_AUTH_SHIFT 18
204#define AUTH_FIRST_SHIFT 17
205#define AUTH_LAST_SHIFT 16
206
207#define AUTH_POS_SHIFT 14
208#define AUTH_POS_MASK GENMASK(15, 14)
209#define AUTH_POS_BEFORE 0
210#define AUTH_POS_AFTER 1
211
212#define AUTH_SIZE_SHIFT 9
213#define AUTH_SIZE_MASK GENMASK(13, 9)
214#define AUTH_SIZE_SHA1 0
215#define AUTH_SIZE_SHA256 1
216#define AUTH_SIZE_ENUM_1_BYTES 0
217#define AUTH_SIZE_ENUM_2_BYTES 1
218#define AUTH_SIZE_ENUM_3_BYTES 2
219#define AUTH_SIZE_ENUM_4_BYTES 3
220#define AUTH_SIZE_ENUM_5_BYTES 4
221#define AUTH_SIZE_ENUM_6_BYTES 5
222#define AUTH_SIZE_ENUM_7_BYTES 6
223#define AUTH_SIZE_ENUM_8_BYTES 7
224#define AUTH_SIZE_ENUM_9_BYTES 8
225#define AUTH_SIZE_ENUM_10_BYTES 9
226#define AUTH_SIZE_ENUM_11_BYTES 10
227#define AUTH_SIZE_ENUM_12_BYTES 11
228#define AUTH_SIZE_ENUM_13_BYTES 12
229#define AUTH_SIZE_ENUM_14_BYTES 13
230#define AUTH_SIZE_ENUM_15_BYTES 14
231#define AUTH_SIZE_ENUM_16_BYTES 15
232
233#define AUTH_MODE_SHIFT 6
234#define AUTH_MODE_MASK GENMASK(8, 6)
235#define AUTH_MODE_HASH 0
236#define AUTH_MODE_HMAC 1
237#define AUTH_MODE_CCM 0
238#define AUTH_MODE_CMAC 1
239
240#define AUTH_KEY_SIZE_SHIFT 3
241#define AUTH_KEY_SIZE_MASK GENMASK(5, 3)
242#define AUTH_KEY_SZ_AES128 0
243#define AUTH_KEY_SZ_AES256 2
244
245#define AUTH_ALG_SHIFT 0
246#define AUTH_ALG_MASK GENMASK(2, 0)
247#define AUTH_ALG_NONE 0
248#define AUTH_ALG_SHA 1
249#define AUTH_ALG_AES 2
250#define AUTH_ALG_KASUMI 3
251#define AUTH_ALG_SNOW3G 4
252#define AUTH_ALG_ZUC 5
253
254/* Register bits - REG_ENCR_XTS_DU_SIZE */
255#define ENCR_XTS_DU_SIZE_SHIFT 0
256#define ENCR_XTS_DU_SIZE_MASK GENMASK(19, 0)
257
258/* Register bits - REG_ENCR_SEG_CFG */
259#define F8_KEYSTREAM_ENABLE_SHIFT 17
260#define F8_KEYSTREAM_DISABLED 0
261#define F8_KEYSTREAM_ENABLED 1
262
263#define F8_DIRECTION_SHIFT 16
264#define F8_DIRECTION_UPLINK 0
265#define F8_DIRECTION_DOWNLINK 1
266
267#define USE_PIPE_KEY_ENCR_SHIFT 15
268#define USE_PIPE_KEY_ENCR_ENABLED 1
269#define USE_KEY_REGISTERS 0
270
271#define USE_HW_KEY_ENCR_SHIFT 14
272#define USE_KEY_REG 0
273#define USE_HW_KEY 1
274
275#define LAST_CCM_SHIFT 13
276#define LAST_CCM_XFR 1
277#define INTERM_CCM_XFR 0
278
279#define CNTR_ALG_SHIFT 11
280#define CNTR_ALG_MASK GENMASK(12, 11)
281#define CNTR_ALG_NIST 0
282
283#define ENCODE_SHIFT 10
284
285#define ENCR_MODE_SHIFT 6
286#define ENCR_MODE_MASK GENMASK(9, 6)
287#define ENCR_MODE_ECB 0
288#define ENCR_MODE_CBC 1
289#define ENCR_MODE_CTR 2
290#define ENCR_MODE_XTS 3
291#define ENCR_MODE_CCM 4
292
293#define ENCR_KEY_SZ_SHIFT 3
294#define ENCR_KEY_SZ_MASK GENMASK(5, 3)
295#define ENCR_KEY_SZ_DES 0
296#define ENCR_KEY_SZ_3DES 1
297#define ENCR_KEY_SZ_AES128 0
298#define ENCR_KEY_SZ_AES256 2
299
300#define ENCR_ALG_SHIFT 0
301#define ENCR_ALG_MASK GENMASK(2, 0)
302#define ENCR_ALG_NONE 0
303#define ENCR_ALG_DES 1
304#define ENCR_ALG_AES 2
305#define ENCR_ALG_KASUMI 4
306#define ENCR_ALG_SNOW_3G 5
307#define ENCR_ALG_ZUC 6
308
309/* Register bits - REG_GOPROC */
310#define GO_SHIFT 0
311#define CLR_CNTXT_SHIFT 1
312#define RESULTS_DUMP_SHIFT 2
313
314/* Register bits - REG_ENGINES_AVAIL */
315#define ENCR_AES_SEL_SHIFT 0
316#define DES_SEL_SHIFT 1
317#define ENCR_SNOW3G_SEL_SHIFT 2
318#define ENCR_KASUMI_SEL_SHIFT 3
319#define SHA_SEL_SHIFT 4
320#define SHA512_SEL_SHIFT 5
321#define AUTH_AES_SEL_SHIFT 6
322#define AUTH_SNOW3G_SEL_SHIFT 7
323#define AUTH_KASUMI_SEL_SHIFT 8
324#define BAM_PIPE_SETS_SHIFT 9
325#define BAM_PIPE_SETS_MASK GENMASK(12, 9)
326#define AXI_WR_BEATS_SHIFT 13
327#define AXI_WR_BEATS_MASK GENMASK(18, 13)
328#define AXI_RD_BEATS_SHIFT 19
329#define AXI_RD_BEATS_MASK GENMASK(24, 19)
330#define ENCR_ZUC_SEL_SHIFT 26
331#define AUTH_ZUC_SEL_SHIFT 27
332#define ZUC_ENABLE_SHIFT 28
333
334#endif /* _REGS_V5_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
new file mode 100644
index 000000000000..3c33ac9c8cba
--- /dev/null
+++ b/drivers/crypto/qce/sha.c
@@ -0,0 +1,588 @@
1/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <crypto/internal/hash.h>
17
18#include "common.h"
19#include "core.h"
20#include "sha.h"
21
22/* crypto hw padding constant for first operation */
23#define SHA_PADDING 64
24#define SHA_PADDING_MASK (SHA_PADDING - 1)
25
26static LIST_HEAD(ahash_algs);
27
28static const __be32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
29 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
30};
31
32static const __be32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
33 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
34 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
35};
36
37static void qce_ahash_done(void *data)
38{
39 struct crypto_async_request *async_req = data;
40 struct ahash_request *req = ahash_request_cast(async_req);
41 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
42 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
43 struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
44 struct qce_device *qce = tmpl->qce;
45 struct qce_result_dump *result = qce->dma.result_buf;
46 unsigned int digestsize = crypto_ahash_digestsize(ahash);
47 int error;
48 u32 status;
49
50 error = qce_dma_terminate_all(&qce->dma);
51 if (error)
52 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
53
54 qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
55 rctx->src_chained);
56 qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
57
58 memcpy(rctx->digest, result->auth_iv, digestsize);
59 if (req->result)
60 memcpy(req->result, result->auth_iv, digestsize);
61
62 rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
63 rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);
64
65 error = qce_check_status(qce, &status);
66 if (error < 0)
67 dev_dbg(qce->dev, "ahash operation error (%x)\n", status);
68
69 req->src = rctx->src_orig;
70 req->nbytes = rctx->nbytes_orig;
71 rctx->last_blk = false;
72 rctx->first_blk = false;
73
74 qce->async_req_done(tmpl->qce, error);
75}
76
77static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
78{
79 struct ahash_request *req = ahash_request_cast(async_req);
80 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
81 struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
82 struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
83 struct qce_device *qce = tmpl->qce;
84 unsigned long flags = rctx->flags;
85 int ret;
86
87 if (IS_SHA_HMAC(flags)) {
88 rctx->authkey = ctx->authkey;
89 rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
90 } else if (IS_CMAC(flags)) {
91 rctx->authkey = ctx->authkey;
92 rctx->authklen = AES_KEYSIZE_128;
93 }
94
95 rctx->src_nents = qce_countsg(req->src, req->nbytes,
96 &rctx->src_chained);
97 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
98 rctx->src_chained);
99 if (ret < 0)
100 return ret;
101
102 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
103
104 ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
105 if (ret < 0)
106 goto error_unmap_src;
107
108 ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
109 &rctx->result_sg, 1, qce_ahash_done, async_req);
110 if (ret)
111 goto error_unmap_dst;
112
113 qce_dma_issue_pending(&qce->dma);
114
115 ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
116 if (ret)
117 goto error_terminate;
118
119 return 0;
120
121error_terminate:
122 qce_dma_terminate_all(&qce->dma);
123error_unmap_dst:
124 qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
125error_unmap_src:
126 qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
127 rctx->src_chained);
128 return ret;
129}
130
131static int qce_ahash_init(struct ahash_request *req)
132{
133 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
134 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
135 const __be32 *std_iv = tmpl->std_iv;
136
137 memset(rctx, 0, sizeof(*rctx));
138 rctx->first_blk = true;
139 rctx->last_blk = false;
140 rctx->flags = tmpl->alg_flags;
141 memcpy(rctx->digest, std_iv, sizeof(rctx->digest));
142
143 return 0;
144}
145
146static int qce_ahash_export(struct ahash_request *req, void *out)
147{
148 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
149 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
150 unsigned long flags = rctx->flags;
151 unsigned int digestsize = crypto_ahash_digestsize(ahash);
152 unsigned int blocksize =
153 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
154
155 if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
156 struct sha1_state *out_state = out;
157
158 out_state->count = rctx->count;
159 qce_cpu_to_be32p_array(out_state->state, rctx->digest,
160 digestsize);
161 memcpy(out_state->buffer, rctx->buf, blocksize);
162 } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
163 struct sha256_state *out_state = out;
164
165 out_state->count = rctx->count;
166 qce_cpu_to_be32p_array(out_state->state, rctx->digest,
167 digestsize);
168 memcpy(out_state->buf, rctx->buf, blocksize);
169 } else {
170 return -EINVAL;
171 }
172
173 return 0;
174}
175
176static int qce_import_common(struct ahash_request *req, u64 in_count,
177 const u32 *state, const u8 *buffer, bool hmac)
178{
179 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
180 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
181 unsigned int digestsize = crypto_ahash_digestsize(ahash);
182 unsigned int blocksize;
183 u64 count = in_count;
184
185 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
186 rctx->count = in_count;
187 memcpy(rctx->buf, buffer, blocksize);
188
189 if (in_count <= blocksize) {
190 rctx->first_blk = 1;
191 } else {
192 rctx->first_blk = 0;
193 /*
194 * For HMAC, there is a hardware padding done when first block
195 * is set. Therefore the byte_count must be incremened by 64
196 * after the first block operation.
197 */
198 if (hmac)
199 count += SHA_PADDING;
200 }
201
202 rctx->byte_count[0] = (__be32)(count & ~SHA_PADDING_MASK);
203 rctx->byte_count[1] = (__be32)(count >> 32);
204 qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
205 digestsize);
206 rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
207
208 return 0;
209}
210
211static int qce_ahash_import(struct ahash_request *req, const void *in)
212{
213 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
214 unsigned long flags = rctx->flags;
215 bool hmac = IS_SHA_HMAC(flags);
216 int ret = -EINVAL;
217
218 if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
219 const struct sha1_state *state = in;
220
221 ret = qce_import_common(req, state->count, state->state,
222 state->buffer, hmac);
223 } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
224 const struct sha256_state *state = in;
225
226 ret = qce_import_common(req, state->count, state->state,
227 state->buf, hmac);
228 }
229
230 return ret;
231}
232
233static int qce_ahash_update(struct ahash_request *req)
234{
235 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
236 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
237 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
238 struct qce_device *qce = tmpl->qce;
239 struct scatterlist *sg_last, *sg;
240 unsigned int total, len;
241 unsigned int hash_later;
242 unsigned int nbytes;
243 unsigned int blocksize;
244
245 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
246 rctx->count += req->nbytes;
247
248 /* check for buffer from previous updates and append it */
249 total = req->nbytes + rctx->buflen;
250
251 if (total <= blocksize) {
252 scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src,
253 0, req->nbytes, 0);
254 rctx->buflen += req->nbytes;
255 return 0;
256 }
257
258 /* save the original req structure fields */
259 rctx->src_orig = req->src;
260 rctx->nbytes_orig = req->nbytes;
261
262 /*
263 * if we have data from previous update copy them on buffer. The old
264 * data will be combined with current request bytes.
265 */
266 if (rctx->buflen)
267 memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
268
269 /* calculate how many bytes will be hashed later */
270 hash_later = total % blocksize;
271 if (hash_later) {
272 unsigned int src_offset = req->nbytes - hash_later;
273 scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
274 hash_later, 0);
275 }
276
277 /* here nbytes is multiple of blocksize */
278 nbytes = total - hash_later;
279
280 len = rctx->buflen;
281 sg = sg_last = req->src;
282
283 while (len < nbytes && sg) {
284 if (len + sg_dma_len(sg) > nbytes)
285 break;
286 len += sg_dma_len(sg);
287 sg_last = sg;
288 sg = scatterwalk_sg_next(sg);
289 }
290
291 if (!sg_last)
292 return -EINVAL;
293
294 sg_mark_end(sg_last);
295
296 if (rctx->buflen) {
297 sg_init_table(rctx->sg, 2);
298 sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
299 scatterwalk_sg_chain(rctx->sg, 2, req->src);
300 req->src = rctx->sg;
301 }
302
303 req->nbytes = nbytes;
304 rctx->buflen = hash_later;
305
306 return qce->async_req_enqueue(tmpl->qce, &req->base);
307}
308
309static int qce_ahash_final(struct ahash_request *req)
310{
311 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
312 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
313 struct qce_device *qce = tmpl->qce;
314
315 if (!rctx->buflen)
316 return 0;
317
318 rctx->last_blk = true;
319
320 rctx->src_orig = req->src;
321 rctx->nbytes_orig = req->nbytes;
322
323 memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
324 sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen);
325
326 req->src = rctx->sg;
327 req->nbytes = rctx->buflen;
328
329 return qce->async_req_enqueue(tmpl->qce, &req->base);
330}
331
332static int qce_ahash_digest(struct ahash_request *req)
333{
334 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
335 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
336 struct qce_device *qce = tmpl->qce;
337 int ret;
338
339 ret = qce_ahash_init(req);
340 if (ret)
341 return ret;
342
343 rctx->src_orig = req->src;
344 rctx->nbytes_orig = req->nbytes;
345 rctx->first_blk = true;
346 rctx->last_blk = true;
347
348 return qce->async_req_enqueue(tmpl->qce, &req->base);
349}
350
351struct qce_ahash_result {
352 struct completion completion;
353 int error;
354};
355
356static void qce_digest_complete(struct crypto_async_request *req, int error)
357{
358 struct qce_ahash_result *result = req->data;
359
360 if (error == -EINPROGRESS)
361 return;
362
363 result->error = error;
364 complete(&result->completion);
365}
366
367static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
368 unsigned int keylen)
369{
370 unsigned int digestsize = crypto_ahash_digestsize(tfm);
371 struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
372 struct qce_ahash_result result;
373 struct ahash_request *req;
374 struct scatterlist sg;
375 unsigned int blocksize;
376 struct crypto_ahash *ahash_tfm;
377 u8 *buf;
378 int ret;
379 const char *alg_name;
380
381 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
382 memset(ctx->authkey, 0, sizeof(ctx->authkey));
383
384 if (keylen <= blocksize) {
385 memcpy(ctx->authkey, key, keylen);
386 return 0;
387 }
388
389 if (digestsize == SHA1_DIGEST_SIZE)
390 alg_name = "sha1-qce";
391 else if (digestsize == SHA256_DIGEST_SIZE)
392 alg_name = "sha256-qce";
393 else
394 return -EINVAL;
395
396 ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
397 CRYPTO_ALG_TYPE_AHASH_MASK);
398 if (IS_ERR(ahash_tfm))
399 return PTR_ERR(ahash_tfm);
400
401 req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
402 if (!req) {
403 ret = -ENOMEM;
404 goto err_free_ahash;
405 }
406
407 init_completion(&result.completion);
408 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
409 qce_digest_complete, &result);
410 crypto_ahash_clear_flags(ahash_tfm, ~0);
411
412 buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
413 if (!buf) {
414 ret = -ENOMEM;
415 goto err_free_req;
416 }
417
418 memcpy(buf, key, keylen);
419 sg_init_one(&sg, buf, keylen);
420 ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
421
422 ret = crypto_ahash_digest(req);
423 if (ret == -EINPROGRESS || ret == -EBUSY) {
424 ret = wait_for_completion_interruptible(&result.completion);
425 if (!ret)
426 ret = result.error;
427 }
428
429 if (ret)
430 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
431
432 kfree(buf);
433err_free_req:
434 ahash_request_free(req);
435err_free_ahash:
436 crypto_free_ahash(ahash_tfm);
437 return ret;
438}
439
440static int qce_ahash_cra_init(struct crypto_tfm *tfm)
441{
442 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
443 struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm);
444
445 crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx));
446 memset(ctx, 0, sizeof(*ctx));
447 return 0;
448}
449
450struct qce_ahash_def {
451 unsigned long flags;
452 const char *name;
453 const char *drv_name;
454 unsigned int digestsize;
455 unsigned int blocksize;
456 unsigned int statesize;
457 const __be32 *std_iv;
458};
459
460static const struct qce_ahash_def ahash_def[] = {
461 {
462 .flags = QCE_HASH_SHA1,
463 .name = "sha1",
464 .drv_name = "sha1-qce",
465 .digestsize = SHA1_DIGEST_SIZE,
466 .blocksize = SHA1_BLOCK_SIZE,
467 .statesize = sizeof(struct sha1_state),
468 .std_iv = std_iv_sha1,
469 },
470 {
471 .flags = QCE_HASH_SHA256,
472 .name = "sha256",
473 .drv_name = "sha256-qce",
474 .digestsize = SHA256_DIGEST_SIZE,
475 .blocksize = SHA256_BLOCK_SIZE,
476 .statesize = sizeof(struct sha256_state),
477 .std_iv = std_iv_sha256,
478 },
479 {
480 .flags = QCE_HASH_SHA1_HMAC,
481 .name = "hmac(sha1)",
482 .drv_name = "hmac-sha1-qce",
483 .digestsize = SHA1_DIGEST_SIZE,
484 .blocksize = SHA1_BLOCK_SIZE,
485 .statesize = sizeof(struct sha1_state),
486 .std_iv = std_iv_sha1,
487 },
488 {
489 .flags = QCE_HASH_SHA256_HMAC,
490 .name = "hmac(sha256)",
491 .drv_name = "hmac-sha256-qce",
492 .digestsize = SHA256_DIGEST_SIZE,
493 .blocksize = SHA256_BLOCK_SIZE,
494 .statesize = sizeof(struct sha256_state),
495 .std_iv = std_iv_sha256,
496 },
497};
498
499static int qce_ahash_register_one(const struct qce_ahash_def *def,
500 struct qce_device *qce)
501{
502 struct qce_alg_template *tmpl;
503 struct ahash_alg *alg;
504 struct crypto_alg *base;
505 int ret;
506
507 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
508 if (!tmpl)
509 return -ENOMEM;
510
511 tmpl->std_iv = def->std_iv;
512
513 alg = &tmpl->alg.ahash;
514 alg->init = qce_ahash_init;
515 alg->update = qce_ahash_update;
516 alg->final = qce_ahash_final;
517 alg->digest = qce_ahash_digest;
518 alg->export = qce_ahash_export;
519 alg->import = qce_ahash_import;
520 if (IS_SHA_HMAC(def->flags))
521 alg->setkey = qce_ahash_hmac_setkey;
522 alg->halg.digestsize = def->digestsize;
523 alg->halg.statesize = def->statesize;
524
525 base = &alg->halg.base;
526 base->cra_blocksize = def->blocksize;
527 base->cra_priority = 300;
528 base->cra_flags = CRYPTO_ALG_ASYNC;
529 base->cra_ctxsize = sizeof(struct qce_sha_ctx);
530 base->cra_alignmask = 0;
531 base->cra_module = THIS_MODULE;
532 base->cra_init = qce_ahash_cra_init;
533 INIT_LIST_HEAD(&base->cra_list);
534
535 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
536 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
537 def->drv_name);
538
539 INIT_LIST_HEAD(&tmpl->entry);
540 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
541 tmpl->alg_flags = def->flags;
542 tmpl->qce = qce;
543
544 ret = crypto_register_ahash(alg);
545 if (ret) {
546 kfree(tmpl);
547 dev_err(qce->dev, "%s registration failed\n", base->cra_name);
548 return ret;
549 }
550
551 list_add_tail(&tmpl->entry, &ahash_algs);
552 dev_dbg(qce->dev, "%s is registered\n", base->cra_name);
553 return 0;
554}
555
556static void qce_ahash_unregister(struct qce_device *qce)
557{
558 struct qce_alg_template *tmpl, *n;
559
560 list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) {
561 crypto_unregister_ahash(&tmpl->alg.ahash);
562 list_del(&tmpl->entry);
563 kfree(tmpl);
564 }
565}
566
567static int qce_ahash_register(struct qce_device *qce)
568{
569 int ret, i;
570
571 for (i = 0; i < ARRAY_SIZE(ahash_def); i++) {
572 ret = qce_ahash_register_one(&ahash_def[i], qce);
573 if (ret)
574 goto err;
575 }
576
577 return 0;
578err:
579 qce_ahash_unregister(qce);
580 return ret;
581}
582
583const struct qce_algo_ops ahash_ops = {
584 .type = CRYPTO_ALG_TYPE_AHASH,
585 .register_algs = qce_ahash_register,
586 .unregister_algs = qce_ahash_unregister,
587 .async_req_handle = qce_ahash_async_req_handle,
588};
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
new file mode 100644
index 000000000000..286f0d5397f3
--- /dev/null
+++ b/drivers/crypto/qce/sha.h
@@ -0,0 +1,81 @@
1/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _SHA_H_
15#define _SHA_H_
16
17#include <crypto/scatterwalk.h>
18#include <crypto/sha.h>
19
20#include "common.h"
21#include "core.h"
22
23#define QCE_SHA_MAX_BLOCKSIZE SHA256_BLOCK_SIZE
24#define QCE_SHA_MAX_DIGESTSIZE SHA256_DIGEST_SIZE
25
26struct qce_sha_ctx {
27 u8 authkey[QCE_SHA_MAX_BLOCKSIZE];
28};
29
30/**
31 * struct qce_sha_reqctx - holds private ahash objects per request
32 * @buf: used during update, import and export
33 * @tmpbuf: buffer for internal use
34 * @digest: calculated digest buffer
35 * @buflen: length of the buffer
36 * @flags: operation flags
37 * @src_orig: original request sg list
38 * @nbytes_orig: original request number of bytes
39 * @src_chained: is source scatterlist chained
40 * @src_nents: source number of entries
41 * @byte_count: byte count
42 * @count: save count in states during update, import and export
43 * @first_blk: is it the first block
44 * @last_blk: is it the last block
45 * @sg: used to chain sg lists
46 * @authkey: pointer to auth key in sha ctx
47 * @authklen: auth key length
48 * @result_sg: scatterlist used for result buffer
49 */
50struct qce_sha_reqctx {
51 u8 buf[QCE_SHA_MAX_BLOCKSIZE];
52 u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE];
53 u8 digest[QCE_SHA_MAX_DIGESTSIZE];
54 unsigned int buflen;
55 unsigned long flags;
56 struct scatterlist *src_orig;
57 unsigned int nbytes_orig;
58 bool src_chained;
59 int src_nents;
60 __be32 byte_count[2];
61 u64 count;
62 bool first_blk;
63 bool last_blk;
64 struct scatterlist sg[2];
65 u8 *authkey;
66 unsigned int authklen;
67 struct scatterlist result_sg;
68};
69
70static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm)
71{
72 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
73 struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
74 struct ahash_alg, halg);
75
76 return container_of(alg, struct qce_alg_template, alg.ahash);
77}
78
79extern const struct qce_algo_ops ahash_ops;
80
81#endif /* _SHA_H_ */