aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qce/ablkcipher.c
diff options
context:
space:
mode:
authorStanimir Varbanov <svarbanov@mm-sol.com>2014-06-25 12:28:57 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2014-07-03 09:40:27 -0400
commitec8f5d8f6f76b939f662d6e83041abecabef0a34 (patch)
tree507b8aff2cec7d3f5b2a4b1f94aa3da9a2d3fd04 /drivers/crypto/qce/ablkcipher.c
parent002c77a48b479b094b834b02ef78be47ceac76fd (diff)
crypto: qce - Qualcomm crypto engine driver
The driver is separated by functional parts. The core part implements a platform driver probe and remove callbaks. The probe enables clocks, checks crypto version, initialize and request dma channels, create done tasklet and init crypto queue and finally register the algorithms into crypto core subsystem. - DMA and SG helper functions implement dmaengine and sg-list helper functions used by other parts of the crypto driver. - ablkcipher algorithms implementation of AES, DES and 3DES crypto API callbacks, the crypto register alg function, the async request handler and its dma done callback function. - SHA and HMAC transforms implementation and registration of ahash crypto type. It includes sha1, sha256, hmac(sha1) and hmac(sha256). - infrastructure to setup the crypto hw contains functions used to setup/prepare hardware registers for all algorithms supported by the crypto block. It also exports few helper functions needed by algorithms: - to check hardware status - to start crypto hardware - to translate data stream to big endian form Adds register addresses and bit/masks used by the driver as well. Signed-off-by: Stanimir Varbanov <svarbanov@mm-sol.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/qce/ablkcipher.c')
-rw-r--r--drivers/crypto/qce/ablkcipher.c431
1 files changed, 431 insertions, 0 deletions
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
new file mode 100644
index 000000000000..ad592de475a4
--- /dev/null
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -0,0 +1,431 @@
1/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <crypto/aes.h>
18#include <crypto/algapi.h>
19#include <crypto/des.h>
20
21#include "cipher.h"
22
23static LIST_HEAD(ablkcipher_algs);
24
25static void qce_ablkcipher_done(void *data)
26{
27 struct crypto_async_request *async_req = data;
28 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
29 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
30 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
31 struct qce_device *qce = tmpl->qce;
32 enum dma_data_direction dir_src, dir_dst;
33 u32 status;
34 int error;
35 bool diff_dst;
36
37 diff_dst = (req->src != req->dst) ? true : false;
38 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
39 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
40
41 error = qce_dma_terminate_all(&qce->dma);
42 if (error)
43 dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
44 error);
45
46 if (diff_dst)
47 qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
48 rctx->dst_chained);
49 qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
50 rctx->dst_chained);
51
52 sg_free_table(&rctx->dst_tbl);
53
54 error = qce_check_status(qce, &status);
55 if (error < 0)
56 dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
57
58 qce->async_req_done(tmpl->qce, error);
59}
60
61static int
62qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
63{
64 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
65 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
66 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
67 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
68 struct qce_device *qce = tmpl->qce;
69 enum dma_data_direction dir_src, dir_dst;
70 struct scatterlist *sg;
71 bool diff_dst;
72 gfp_t gfp;
73 int ret;
74
75 rctx->iv = req->info;
76 rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
77 rctx->cryptlen = req->nbytes;
78
79 diff_dst = (req->src != req->dst) ? true : false;
80 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
81 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
82
83 rctx->src_nents = qce_countsg(req->src, req->nbytes,
84 &rctx->src_chained);
85 if (diff_dst) {
86 rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
87 &rctx->dst_chained);
88 } else {
89 rctx->dst_nents = rctx->src_nents;
90 rctx->dst_chained = rctx->src_chained;
91 }
92
93 rctx->dst_nents += 1;
94
95 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
96 GFP_KERNEL : GFP_ATOMIC;
97
98 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
99 if (ret)
100 return ret;
101
102 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
103
104 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
105 if (IS_ERR(sg)) {
106 ret = PTR_ERR(sg);
107 goto error_free;
108 }
109
110 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
111 if (IS_ERR(sg)) {
112 ret = PTR_ERR(sg);
113 goto error_free;
114 }
115
116 sg_mark_end(sg);
117 rctx->dst_sg = rctx->dst_tbl.sgl;
118
119 ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
120 rctx->dst_chained);
121 if (ret < 0)
122 goto error_free;
123
124 if (diff_dst) {
125 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
126 rctx->src_chained);
127 if (ret < 0)
128 goto error_unmap_dst;
129 rctx->src_sg = req->src;
130 } else {
131 rctx->src_sg = rctx->dst_sg;
132 }
133
134 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
135 rctx->dst_sg, rctx->dst_nents,
136 qce_ablkcipher_done, async_req);
137 if (ret)
138 goto error_unmap_src;
139
140 qce_dma_issue_pending(&qce->dma);
141
142 ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
143 if (ret)
144 goto error_terminate;
145
146 return 0;
147
148error_terminate:
149 qce_dma_terminate_all(&qce->dma);
150error_unmap_src:
151 if (diff_dst)
152 qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
153 rctx->src_chained);
154error_unmap_dst:
155 qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
156 rctx->dst_chained);
157error_free:
158 sg_free_table(&rctx->dst_tbl);
159 return ret;
160}
161
162static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
163 unsigned int keylen)
164{
165 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
166 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
167 unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
168 int ret;
169
170 if (!key || !keylen)
171 return -EINVAL;
172
173 if (IS_AES(flags)) {
174 switch (keylen) {
175 case AES_KEYSIZE_128:
176 case AES_KEYSIZE_256:
177 break;
178 default:
179 goto fallback;
180 }
181 } else if (IS_DES(flags)) {
182 u32 tmp[DES_EXPKEY_WORDS];
183
184 ret = des_ekey(tmp, key);
185 if (!ret && crypto_ablkcipher_get_flags(ablk) &
186 CRYPTO_TFM_REQ_WEAK_KEY)
187 goto weakkey;
188 }
189
190 ctx->enc_keylen = keylen;
191 memcpy(ctx->enc_key, key, keylen);
192 return 0;
193fallback:
194 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
195 if (!ret)
196 ctx->enc_keylen = keylen;
197 return ret;
198weakkey:
199 crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
200 return -EINVAL;
201}
202
203static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
204{
205 struct crypto_tfm *tfm =
206 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
207 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
208 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
209 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
210 int ret;
211
212 rctx->flags = tmpl->alg_flags;
213 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
214
215 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
216 ctx->enc_keylen != AES_KEYSIZE_256) {
217 ablkcipher_request_set_tfm(req, ctx->fallback);
218 ret = encrypt ? crypto_ablkcipher_encrypt(req) :
219 crypto_ablkcipher_decrypt(req);
220 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
221 return ret;
222 }
223
224 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
225}
226
227static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
228{
229 return qce_ablkcipher_crypt(req, 1);
230}
231
232static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
233{
234 return qce_ablkcipher_crypt(req, 0);
235}
236
237static int qce_ablkcipher_init(struct crypto_tfm *tfm)
238{
239 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
240
241 memset(ctx, 0, sizeof(*ctx));
242 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
243
244 ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
245 CRYPTO_ALG_TYPE_ABLKCIPHER,
246 CRYPTO_ALG_ASYNC |
247 CRYPTO_ALG_NEED_FALLBACK);
248 if (IS_ERR(ctx->fallback))
249 return PTR_ERR(ctx->fallback);
250
251 return 0;
252}
253
254static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
255{
256 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
257
258 crypto_free_ablkcipher(ctx->fallback);
259}
260
261struct qce_ablkcipher_def {
262 unsigned long flags;
263 const char *name;
264 const char *drv_name;
265 unsigned int blocksize;
266 unsigned int ivsize;
267 unsigned int min_keysize;
268 unsigned int max_keysize;
269};
270
271static const struct qce_ablkcipher_def ablkcipher_def[] = {
272 {
273 .flags = QCE_ALG_AES | QCE_MODE_ECB,
274 .name = "ecb(aes)",
275 .drv_name = "ecb-aes-qce",
276 .blocksize = AES_BLOCK_SIZE,
277 .ivsize = AES_BLOCK_SIZE,
278 .min_keysize = AES_MIN_KEY_SIZE,
279 .max_keysize = AES_MAX_KEY_SIZE,
280 },
281 {
282 .flags = QCE_ALG_AES | QCE_MODE_CBC,
283 .name = "cbc(aes)",
284 .drv_name = "cbc-aes-qce",
285 .blocksize = AES_BLOCK_SIZE,
286 .ivsize = AES_BLOCK_SIZE,
287 .min_keysize = AES_MIN_KEY_SIZE,
288 .max_keysize = AES_MAX_KEY_SIZE,
289 },
290 {
291 .flags = QCE_ALG_AES | QCE_MODE_CTR,
292 .name = "ctr(aes)",
293 .drv_name = "ctr-aes-qce",
294 .blocksize = AES_BLOCK_SIZE,
295 .ivsize = AES_BLOCK_SIZE,
296 .min_keysize = AES_MIN_KEY_SIZE,
297 .max_keysize = AES_MAX_KEY_SIZE,
298 },
299 {
300 .flags = QCE_ALG_AES | QCE_MODE_XTS,
301 .name = "xts(aes)",
302 .drv_name = "xts-aes-qce",
303 .blocksize = AES_BLOCK_SIZE,
304 .ivsize = AES_BLOCK_SIZE,
305 .min_keysize = AES_MIN_KEY_SIZE,
306 .max_keysize = AES_MAX_KEY_SIZE,
307 },
308 {
309 .flags = QCE_ALG_DES | QCE_MODE_ECB,
310 .name = "ecb(des)",
311 .drv_name = "ecb-des-qce",
312 .blocksize = DES_BLOCK_SIZE,
313 .ivsize = 0,
314 .min_keysize = DES_KEY_SIZE,
315 .max_keysize = DES_KEY_SIZE,
316 },
317 {
318 .flags = QCE_ALG_DES | QCE_MODE_CBC,
319 .name = "cbc(des)",
320 .drv_name = "cbc-des-qce",
321 .blocksize = DES_BLOCK_SIZE,
322 .ivsize = DES_BLOCK_SIZE,
323 .min_keysize = DES_KEY_SIZE,
324 .max_keysize = DES_KEY_SIZE,
325 },
326 {
327 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
328 .name = "ecb(des3_ede)",
329 .drv_name = "ecb-3des-qce",
330 .blocksize = DES3_EDE_BLOCK_SIZE,
331 .ivsize = 0,
332 .min_keysize = DES3_EDE_KEY_SIZE,
333 .max_keysize = DES3_EDE_KEY_SIZE,
334 },
335 {
336 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
337 .name = "cbc(des3_ede)",
338 .drv_name = "cbc-3des-qce",
339 .blocksize = DES3_EDE_BLOCK_SIZE,
340 .ivsize = DES3_EDE_BLOCK_SIZE,
341 .min_keysize = DES3_EDE_KEY_SIZE,
342 .max_keysize = DES3_EDE_KEY_SIZE,
343 },
344};
345
346static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
347 struct qce_device *qce)
348{
349 struct qce_alg_template *tmpl;
350 struct crypto_alg *alg;
351 int ret;
352
353 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
354 if (!tmpl)
355 return -ENOMEM;
356
357 alg = &tmpl->alg.crypto;
358
359 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
360 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
361 def->drv_name);
362
363 alg->cra_blocksize = def->blocksize;
364 alg->cra_ablkcipher.ivsize = def->ivsize;
365 alg->cra_ablkcipher.min_keysize = def->min_keysize;
366 alg->cra_ablkcipher.max_keysize = def->max_keysize;
367 alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
368 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
369 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
370
371 alg->cra_priority = 300;
372 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
373 CRYPTO_ALG_NEED_FALLBACK;
374 alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
375 alg->cra_alignmask = 0;
376 alg->cra_type = &crypto_ablkcipher_type;
377 alg->cra_module = THIS_MODULE;
378 alg->cra_init = qce_ablkcipher_init;
379 alg->cra_exit = qce_ablkcipher_exit;
380 INIT_LIST_HEAD(&alg->cra_list);
381
382 INIT_LIST_HEAD(&tmpl->entry);
383 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
384 tmpl->alg_flags = def->flags;
385 tmpl->qce = qce;
386
387 ret = crypto_register_alg(alg);
388 if (ret) {
389 kfree(tmpl);
390 dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
391 return ret;
392 }
393
394 list_add_tail(&tmpl->entry, &ablkcipher_algs);
395 dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
396 return 0;
397}
398
399static void qce_ablkcipher_unregister(struct qce_device *qce)
400{
401 struct qce_alg_template *tmpl, *n;
402
403 list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
404 crypto_unregister_alg(&tmpl->alg.crypto);
405 list_del(&tmpl->entry);
406 kfree(tmpl);
407 }
408}
409
410static int qce_ablkcipher_register(struct qce_device *qce)
411{
412 int ret, i;
413
414 for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
415 ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
416 if (ret)
417 goto err;
418 }
419
420 return 0;
421err:
422 qce_ablkcipher_unregister(qce);
423 return ret;
424}
425
426const struct qce_algo_ops ablkcipher_ops = {
427 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
428 .register_algs = qce_ablkcipher_register,
429 .unregister_algs = qce_ablkcipher_unregister,
430 .async_req_handle = qce_ablkcipher_async_req_handle,
431};