diff options
Diffstat (limited to 'drivers/crypto/qce/sha.c')
-rw-r--r-- | drivers/crypto/qce/sha.c | 588 |
1 files changed, 588 insertions, 0 deletions
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c new file mode 100644 index 000000000000..3c33ac9c8cba --- /dev/null +++ b/drivers/crypto/qce/sha.c | |||
@@ -0,0 +1,588 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <crypto/internal/hash.h> | ||
17 | |||
18 | #include "common.h" | ||
19 | #include "core.h" | ||
20 | #include "sha.h" | ||
21 | |||
22 | /* crypto hw padding constant for first operation */ | ||
23 | #define SHA_PADDING 64 | ||
24 | #define SHA_PADDING_MASK (SHA_PADDING - 1) | ||
25 | |||
26 | static LIST_HEAD(ahash_algs); | ||
27 | |||
28 | static const __be32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(__be32)] = { | ||
29 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 | ||
30 | }; | ||
31 | |||
32 | static const __be32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(__be32)] = { | ||
33 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | ||
34 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 | ||
35 | }; | ||
36 | |||
37 | static void qce_ahash_done(void *data) | ||
38 | { | ||
39 | struct crypto_async_request *async_req = data; | ||
40 | struct ahash_request *req = ahash_request_cast(async_req); | ||
41 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
42 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
43 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | ||
44 | struct qce_device *qce = tmpl->qce; | ||
45 | struct qce_result_dump *result = qce->dma.result_buf; | ||
46 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
47 | int error; | ||
48 | u32 status; | ||
49 | |||
50 | error = qce_dma_terminate_all(&qce->dma); | ||
51 | if (error) | ||
52 | dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); | ||
53 | |||
54 | qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
55 | rctx->src_chained); | ||
56 | qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
57 | |||
58 | memcpy(rctx->digest, result->auth_iv, digestsize); | ||
59 | if (req->result) | ||
60 | memcpy(req->result, result->auth_iv, digestsize); | ||
61 | |||
62 | rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); | ||
63 | rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); | ||
64 | |||
65 | error = qce_check_status(qce, &status); | ||
66 | if (error < 0) | ||
67 | dev_dbg(qce->dev, "ahash operation error (%x)\n", status); | ||
68 | |||
69 | req->src = rctx->src_orig; | ||
70 | req->nbytes = rctx->nbytes_orig; | ||
71 | rctx->last_blk = false; | ||
72 | rctx->first_blk = false; | ||
73 | |||
74 | qce->async_req_done(tmpl->qce, error); | ||
75 | } | ||
76 | |||
77 | static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) | ||
78 | { | ||
79 | struct ahash_request *req = ahash_request_cast(async_req); | ||
80 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
81 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); | ||
82 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | ||
83 | struct qce_device *qce = tmpl->qce; | ||
84 | unsigned long flags = rctx->flags; | ||
85 | int ret; | ||
86 | |||
87 | if (IS_SHA_HMAC(flags)) { | ||
88 | rctx->authkey = ctx->authkey; | ||
89 | rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; | ||
90 | } else if (IS_CMAC(flags)) { | ||
91 | rctx->authkey = ctx->authkey; | ||
92 | rctx->authklen = AES_KEYSIZE_128; | ||
93 | } | ||
94 | |||
95 | rctx->src_nents = qce_countsg(req->src, req->nbytes, | ||
96 | &rctx->src_chained); | ||
97 | ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
98 | rctx->src_chained); | ||
99 | if (ret < 0) | ||
100 | return ret; | ||
101 | |||
102 | sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); | ||
103 | |||
104 | ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
105 | if (ret < 0) | ||
106 | goto error_unmap_src; | ||
107 | |||
108 | ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, | ||
109 | &rctx->result_sg, 1, qce_ahash_done, async_req); | ||
110 | if (ret) | ||
111 | goto error_unmap_dst; | ||
112 | |||
113 | qce_dma_issue_pending(&qce->dma); | ||
114 | |||
115 | ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); | ||
116 | if (ret) | ||
117 | goto error_terminate; | ||
118 | |||
119 | return 0; | ||
120 | |||
121 | error_terminate: | ||
122 | qce_dma_terminate_all(&qce->dma); | ||
123 | error_unmap_dst: | ||
124 | qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
125 | error_unmap_src: | ||
126 | qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
127 | rctx->src_chained); | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static int qce_ahash_init(struct ahash_request *req) | ||
132 | { | ||
133 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
134 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
135 | const __be32 *std_iv = tmpl->std_iv; | ||
136 | |||
137 | memset(rctx, 0, sizeof(*rctx)); | ||
138 | rctx->first_blk = true; | ||
139 | rctx->last_blk = false; | ||
140 | rctx->flags = tmpl->alg_flags; | ||
141 | memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int qce_ahash_export(struct ahash_request *req, void *out) | ||
147 | { | ||
148 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
149 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
150 | unsigned long flags = rctx->flags; | ||
151 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
152 | unsigned int blocksize = | ||
153 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); | ||
154 | |||
155 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { | ||
156 | struct sha1_state *out_state = out; | ||
157 | |||
158 | out_state->count = rctx->count; | ||
159 | qce_cpu_to_be32p_array(out_state->state, rctx->digest, | ||
160 | digestsize); | ||
161 | memcpy(out_state->buffer, rctx->buf, blocksize); | ||
162 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { | ||
163 | struct sha256_state *out_state = out; | ||
164 | |||
165 | out_state->count = rctx->count; | ||
166 | qce_cpu_to_be32p_array(out_state->state, rctx->digest, | ||
167 | digestsize); | ||
168 | memcpy(out_state->buf, rctx->buf, blocksize); | ||
169 | } else { | ||
170 | return -EINVAL; | ||
171 | } | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int qce_import_common(struct ahash_request *req, u64 in_count, | ||
177 | const u32 *state, const u8 *buffer, bool hmac) | ||
178 | { | ||
179 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
180 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
181 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
182 | unsigned int blocksize; | ||
183 | u64 count = in_count; | ||
184 | |||
185 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); | ||
186 | rctx->count = in_count; | ||
187 | memcpy(rctx->buf, buffer, blocksize); | ||
188 | |||
189 | if (in_count <= blocksize) { | ||
190 | rctx->first_blk = 1; | ||
191 | } else { | ||
192 | rctx->first_blk = 0; | ||
193 | /* | ||
194 | * For HMAC, there is a hardware padding done when first block | ||
195 | * is set. Therefore the byte_count must be incremened by 64 | ||
196 | * after the first block operation. | ||
197 | */ | ||
198 | if (hmac) | ||
199 | count += SHA_PADDING; | ||
200 | } | ||
201 | |||
202 | rctx->byte_count[0] = (__be32)(count & ~SHA_PADDING_MASK); | ||
203 | rctx->byte_count[1] = (__be32)(count >> 32); | ||
204 | qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, | ||
205 | digestsize); | ||
206 | rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int qce_ahash_import(struct ahash_request *req, const void *in) | ||
212 | { | ||
213 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
214 | unsigned long flags = rctx->flags; | ||
215 | bool hmac = IS_SHA_HMAC(flags); | ||
216 | int ret = -EINVAL; | ||
217 | |||
218 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { | ||
219 | const struct sha1_state *state = in; | ||
220 | |||
221 | ret = qce_import_common(req, state->count, state->state, | ||
222 | state->buffer, hmac); | ||
223 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { | ||
224 | const struct sha256_state *state = in; | ||
225 | |||
226 | ret = qce_import_common(req, state->count, state->state, | ||
227 | state->buf, hmac); | ||
228 | } | ||
229 | |||
230 | return ret; | ||
231 | } | ||
232 | |||
233 | static int qce_ahash_update(struct ahash_request *req) | ||
234 | { | ||
235 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
236 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
237 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
238 | struct qce_device *qce = tmpl->qce; | ||
239 | struct scatterlist *sg_last, *sg; | ||
240 | unsigned int total, len; | ||
241 | unsigned int hash_later; | ||
242 | unsigned int nbytes; | ||
243 | unsigned int blocksize; | ||
244 | |||
245 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
246 | rctx->count += req->nbytes; | ||
247 | |||
248 | /* check for buffer from previous updates and append it */ | ||
249 | total = req->nbytes + rctx->buflen; | ||
250 | |||
251 | if (total <= blocksize) { | ||
252 | scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, | ||
253 | 0, req->nbytes, 0); | ||
254 | rctx->buflen += req->nbytes; | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /* save the original req structure fields */ | ||
259 | rctx->src_orig = req->src; | ||
260 | rctx->nbytes_orig = req->nbytes; | ||
261 | |||
262 | /* | ||
263 | * if we have data from previous update copy them on buffer. The old | ||
264 | * data will be combined with current request bytes. | ||
265 | */ | ||
266 | if (rctx->buflen) | ||
267 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); | ||
268 | |||
269 | /* calculate how many bytes will be hashed later */ | ||
270 | hash_later = total % blocksize; | ||
271 | if (hash_later) { | ||
272 | unsigned int src_offset = req->nbytes - hash_later; | ||
273 | scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, | ||
274 | hash_later, 0); | ||
275 | } | ||
276 | |||
277 | /* here nbytes is multiple of blocksize */ | ||
278 | nbytes = total - hash_later; | ||
279 | |||
280 | len = rctx->buflen; | ||
281 | sg = sg_last = req->src; | ||
282 | |||
283 | while (len < nbytes && sg) { | ||
284 | if (len + sg_dma_len(sg) > nbytes) | ||
285 | break; | ||
286 | len += sg_dma_len(sg); | ||
287 | sg_last = sg; | ||
288 | sg = scatterwalk_sg_next(sg); | ||
289 | } | ||
290 | |||
291 | if (!sg_last) | ||
292 | return -EINVAL; | ||
293 | |||
294 | sg_mark_end(sg_last); | ||
295 | |||
296 | if (rctx->buflen) { | ||
297 | sg_init_table(rctx->sg, 2); | ||
298 | sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); | ||
299 | scatterwalk_sg_chain(rctx->sg, 2, req->src); | ||
300 | req->src = rctx->sg; | ||
301 | } | ||
302 | |||
303 | req->nbytes = nbytes; | ||
304 | rctx->buflen = hash_later; | ||
305 | |||
306 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
307 | } | ||
308 | |||
309 | static int qce_ahash_final(struct ahash_request *req) | ||
310 | { | ||
311 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
312 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
313 | struct qce_device *qce = tmpl->qce; | ||
314 | |||
315 | if (!rctx->buflen) | ||
316 | return 0; | ||
317 | |||
318 | rctx->last_blk = true; | ||
319 | |||
320 | rctx->src_orig = req->src; | ||
321 | rctx->nbytes_orig = req->nbytes; | ||
322 | |||
323 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); | ||
324 | sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); | ||
325 | |||
326 | req->src = rctx->sg; | ||
327 | req->nbytes = rctx->buflen; | ||
328 | |||
329 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
330 | } | ||
331 | |||
332 | static int qce_ahash_digest(struct ahash_request *req) | ||
333 | { | ||
334 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
335 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
336 | struct qce_device *qce = tmpl->qce; | ||
337 | int ret; | ||
338 | |||
339 | ret = qce_ahash_init(req); | ||
340 | if (ret) | ||
341 | return ret; | ||
342 | |||
343 | rctx->src_orig = req->src; | ||
344 | rctx->nbytes_orig = req->nbytes; | ||
345 | rctx->first_blk = true; | ||
346 | rctx->last_blk = true; | ||
347 | |||
348 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
349 | } | ||
350 | |||
351 | struct qce_ahash_result { | ||
352 | struct completion completion; | ||
353 | int error; | ||
354 | }; | ||
355 | |||
356 | static void qce_digest_complete(struct crypto_async_request *req, int error) | ||
357 | { | ||
358 | struct qce_ahash_result *result = req->data; | ||
359 | |||
360 | if (error == -EINPROGRESS) | ||
361 | return; | ||
362 | |||
363 | result->error = error; | ||
364 | complete(&result->completion); | ||
365 | } | ||
366 | |||
367 | static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
368 | unsigned int keylen) | ||
369 | { | ||
370 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | ||
371 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); | ||
372 | struct qce_ahash_result result; | ||
373 | struct ahash_request *req; | ||
374 | struct scatterlist sg; | ||
375 | unsigned int blocksize; | ||
376 | struct crypto_ahash *ahash_tfm; | ||
377 | u8 *buf; | ||
378 | int ret; | ||
379 | const char *alg_name; | ||
380 | |||
381 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
382 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); | ||
383 | |||
384 | if (keylen <= blocksize) { | ||
385 | memcpy(ctx->authkey, key, keylen); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | if (digestsize == SHA1_DIGEST_SIZE) | ||
390 | alg_name = "sha1-qce"; | ||
391 | else if (digestsize == SHA256_DIGEST_SIZE) | ||
392 | alg_name = "sha256-qce"; | ||
393 | else | ||
394 | return -EINVAL; | ||
395 | |||
396 | ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, | ||
397 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
398 | if (IS_ERR(ahash_tfm)) | ||
399 | return PTR_ERR(ahash_tfm); | ||
400 | |||
401 | req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); | ||
402 | if (!req) { | ||
403 | ret = -ENOMEM; | ||
404 | goto err_free_ahash; | ||
405 | } | ||
406 | |||
407 | init_completion(&result.completion); | ||
408 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
409 | qce_digest_complete, &result); | ||
410 | crypto_ahash_clear_flags(ahash_tfm, ~0); | ||
411 | |||
412 | buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); | ||
413 | if (!buf) { | ||
414 | ret = -ENOMEM; | ||
415 | goto err_free_req; | ||
416 | } | ||
417 | |||
418 | memcpy(buf, key, keylen); | ||
419 | sg_init_one(&sg, buf, keylen); | ||
420 | ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); | ||
421 | |||
422 | ret = crypto_ahash_digest(req); | ||
423 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
424 | ret = wait_for_completion_interruptible(&result.completion); | ||
425 | if (!ret) | ||
426 | ret = result.error; | ||
427 | } | ||
428 | |||
429 | if (ret) | ||
430 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
431 | |||
432 | kfree(buf); | ||
433 | err_free_req: | ||
434 | ahash_request_free(req); | ||
435 | err_free_ahash: | ||
436 | crypto_free_ahash(ahash_tfm); | ||
437 | return ret; | ||
438 | } | ||
439 | |||
440 | static int qce_ahash_cra_init(struct crypto_tfm *tfm) | ||
441 | { | ||
442 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
443 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); | ||
444 | |||
445 | crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); | ||
446 | memset(ctx, 0, sizeof(*ctx)); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | struct qce_ahash_def { | ||
451 | unsigned long flags; | ||
452 | const char *name; | ||
453 | const char *drv_name; | ||
454 | unsigned int digestsize; | ||
455 | unsigned int blocksize; | ||
456 | unsigned int statesize; | ||
457 | const __be32 *std_iv; | ||
458 | }; | ||
459 | |||
460 | static const struct qce_ahash_def ahash_def[] = { | ||
461 | { | ||
462 | .flags = QCE_HASH_SHA1, | ||
463 | .name = "sha1", | ||
464 | .drv_name = "sha1-qce", | ||
465 | .digestsize = SHA1_DIGEST_SIZE, | ||
466 | .blocksize = SHA1_BLOCK_SIZE, | ||
467 | .statesize = sizeof(struct sha1_state), | ||
468 | .std_iv = std_iv_sha1, | ||
469 | }, | ||
470 | { | ||
471 | .flags = QCE_HASH_SHA256, | ||
472 | .name = "sha256", | ||
473 | .drv_name = "sha256-qce", | ||
474 | .digestsize = SHA256_DIGEST_SIZE, | ||
475 | .blocksize = SHA256_BLOCK_SIZE, | ||
476 | .statesize = sizeof(struct sha256_state), | ||
477 | .std_iv = std_iv_sha256, | ||
478 | }, | ||
479 | { | ||
480 | .flags = QCE_HASH_SHA1_HMAC, | ||
481 | .name = "hmac(sha1)", | ||
482 | .drv_name = "hmac-sha1-qce", | ||
483 | .digestsize = SHA1_DIGEST_SIZE, | ||
484 | .blocksize = SHA1_BLOCK_SIZE, | ||
485 | .statesize = sizeof(struct sha1_state), | ||
486 | .std_iv = std_iv_sha1, | ||
487 | }, | ||
488 | { | ||
489 | .flags = QCE_HASH_SHA256_HMAC, | ||
490 | .name = "hmac(sha256)", | ||
491 | .drv_name = "hmac-sha256-qce", | ||
492 | .digestsize = SHA256_DIGEST_SIZE, | ||
493 | .blocksize = SHA256_BLOCK_SIZE, | ||
494 | .statesize = sizeof(struct sha256_state), | ||
495 | .std_iv = std_iv_sha256, | ||
496 | }, | ||
497 | }; | ||
498 | |||
499 | static int qce_ahash_register_one(const struct qce_ahash_def *def, | ||
500 | struct qce_device *qce) | ||
501 | { | ||
502 | struct qce_alg_template *tmpl; | ||
503 | struct ahash_alg *alg; | ||
504 | struct crypto_alg *base; | ||
505 | int ret; | ||
506 | |||
507 | tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); | ||
508 | if (!tmpl) | ||
509 | return -ENOMEM; | ||
510 | |||
511 | tmpl->std_iv = def->std_iv; | ||
512 | |||
513 | alg = &tmpl->alg.ahash; | ||
514 | alg->init = qce_ahash_init; | ||
515 | alg->update = qce_ahash_update; | ||
516 | alg->final = qce_ahash_final; | ||
517 | alg->digest = qce_ahash_digest; | ||
518 | alg->export = qce_ahash_export; | ||
519 | alg->import = qce_ahash_import; | ||
520 | if (IS_SHA_HMAC(def->flags)) | ||
521 | alg->setkey = qce_ahash_hmac_setkey; | ||
522 | alg->halg.digestsize = def->digestsize; | ||
523 | alg->halg.statesize = def->statesize; | ||
524 | |||
525 | base = &alg->halg.base; | ||
526 | base->cra_blocksize = def->blocksize; | ||
527 | base->cra_priority = 300; | ||
528 | base->cra_flags = CRYPTO_ALG_ASYNC; | ||
529 | base->cra_ctxsize = sizeof(struct qce_sha_ctx); | ||
530 | base->cra_alignmask = 0; | ||
531 | base->cra_module = THIS_MODULE; | ||
532 | base->cra_init = qce_ahash_cra_init; | ||
533 | INIT_LIST_HEAD(&base->cra_list); | ||
534 | |||
535 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | ||
536 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
537 | def->drv_name); | ||
538 | |||
539 | INIT_LIST_HEAD(&tmpl->entry); | ||
540 | tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; | ||
541 | tmpl->alg_flags = def->flags; | ||
542 | tmpl->qce = qce; | ||
543 | |||
544 | ret = crypto_register_ahash(alg); | ||
545 | if (ret) { | ||
546 | kfree(tmpl); | ||
547 | dev_err(qce->dev, "%s registration failed\n", base->cra_name); | ||
548 | return ret; | ||
549 | } | ||
550 | |||
551 | list_add_tail(&tmpl->entry, &ahash_algs); | ||
552 | dev_dbg(qce->dev, "%s is registered\n", base->cra_name); | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | static void qce_ahash_unregister(struct qce_device *qce) | ||
557 | { | ||
558 | struct qce_alg_template *tmpl, *n; | ||
559 | |||
560 | list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { | ||
561 | crypto_unregister_ahash(&tmpl->alg.ahash); | ||
562 | list_del(&tmpl->entry); | ||
563 | kfree(tmpl); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | static int qce_ahash_register(struct qce_device *qce) | ||
568 | { | ||
569 | int ret, i; | ||
570 | |||
571 | for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { | ||
572 | ret = qce_ahash_register_one(&ahash_def[i], qce); | ||
573 | if (ret) | ||
574 | goto err; | ||
575 | } | ||
576 | |||
577 | return 0; | ||
578 | err: | ||
579 | qce_ahash_unregister(qce); | ||
580 | return ret; | ||
581 | } | ||
582 | |||
583 | const struct qce_algo_ops ahash_ops = { | ||
584 | .type = CRYPTO_ALG_TYPE_AHASH, | ||
585 | .register_algs = qce_ahash_register, | ||
586 | .unregister_algs = qce_ahash_unregister, | ||
587 | .async_req_handle = qce_ahash_async_req_handle, | ||
588 | }; | ||