aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccp
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2014-01-24 17:18:02 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-02-08 20:59:23 -0500
commitc11baa02c5d6ea06362fa61da070af34b7706c83 (patch)
tree0075799191d2786575589ce71b02ee49fdc2a957 /drivers/crypto/ccp
parentd81ed6534fd988a8a24fb607b459444d4b3d391a (diff)
crypto: ccp - Move HMAC calculation down to ccp ops file
Move the support to perform an HMAC calculation into the CCP operations file. This eliminates the need to perform a synchronous SHA operation used to calculate the HMAC. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccp')
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c130
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h8
-rw-r--r--drivers/crypto/ccp/ccp-ops.c104
3 files changed, 132 insertions, 110 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 3867290b3531..873f23425245 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -24,75 +24,10 @@
24#include "ccp-crypto.h" 24#include "ccp-crypto.h"
25 25
26 26
27struct ccp_sha_result {
28 struct completion completion;
29 int err;
30};
31
32static void ccp_sync_hash_complete(struct crypto_async_request *req, int err)
33{
34 struct ccp_sha_result *result = req->data;
35
36 if (err == -EINPROGRESS)
37 return;
38
39 result->err = err;
40 complete(&result->completion);
41}
42
43static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf,
44 struct scatterlist *sg, unsigned int len)
45{
46 struct ccp_sha_result result;
47 struct ahash_request *req;
48 int ret;
49
50 init_completion(&result.completion);
51
52 req = ahash_request_alloc(tfm, GFP_KERNEL);
53 if (!req)
54 return -ENOMEM;
55
56 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
57 ccp_sync_hash_complete, &result);
58 ahash_request_set_crypt(req, sg, buf, len);
59
60 ret = crypto_ahash_digest(req);
61 if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
62 ret = wait_for_completion_interruptible(&result.completion);
63 if (!ret)
64 ret = result.err;
65 }
66
67 ahash_request_free(req);
68
69 return ret;
70}
71
72static int ccp_sha_finish_hmac(struct crypto_async_request *async_req)
73{
74 struct ahash_request *req = ahash_request_cast(async_req);
75 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
76 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
77 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
78 struct scatterlist sg[2];
79 unsigned int block_size =
80 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
81 unsigned int digest_size = crypto_ahash_digestsize(tfm);
82
83 sg_init_table(sg, ARRAY_SIZE(sg));
84 sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
85 sg_set_buf(&sg[1], rctx->ctx, digest_size);
86
87 return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
88 block_size + digest_size);
89}
90
91static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) 27static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
92{ 28{
93 struct ahash_request *req = ahash_request_cast(async_req); 29 struct ahash_request *req = ahash_request_cast(async_req);
94 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 30 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
95 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
96 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 31 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
97 unsigned int digest_size = crypto_ahash_digestsize(tfm); 32 unsigned int digest_size = crypto_ahash_digestsize(tfm);
98 33
@@ -112,10 +47,6 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
112 if (req->result) 47 if (req->result)
113 memcpy(req->result, rctx->ctx, digest_size); 48 memcpy(req->result, rctx->ctx, digest_size);
114 49
115 /* If we're doing an HMAC, we need to perform that on the final op */
116 if (rctx->final && ctx->u.sha.key_len)
117 ret = ccp_sha_finish_hmac(async_req);
118
119e_free: 50e_free:
120 sg_free_table(&rctx->data_sg); 51 sg_free_table(&rctx->data_sg);
121 52
@@ -126,6 +57,7 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
126 unsigned int final) 57 unsigned int final)
127{ 58{
128 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 59 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
60 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
129 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 61 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
130 struct scatterlist *sg; 62 struct scatterlist *sg;
131 unsigned int block_size = 63 unsigned int block_size =
@@ -196,6 +128,11 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
196 rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); 128 rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
197 rctx->cmd.u.sha.src = sg; 129 rctx->cmd.u.sha.src = sg;
198 rctx->cmd.u.sha.src_len = rctx->hash_cnt; 130 rctx->cmd.u.sha.src_len = rctx->hash_cnt;
131 rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
132 &ctx->u.sha.opad_sg : NULL;
133 rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ?
134 ctx->u.sha.opad_count : 0;
135 rctx->cmd.u.sha.first = rctx->first;
199 rctx->cmd.u.sha.final = rctx->final; 136 rctx->cmd.u.sha.final = rctx->final;
200 rctx->cmd.u.sha.msg_bits = rctx->msg_bits; 137 rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
201 138
@@ -218,7 +155,6 @@ static int ccp_sha_init(struct ahash_request *req)
218 155
219 memset(rctx, 0, sizeof(*rctx)); 156 memset(rctx, 0, sizeof(*rctx));
220 157
221 memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx));
222 rctx->type = alg->type; 158 rctx->type = alg->type;
223 rctx->first = 1; 159 rctx->first = 1;
224 160
@@ -261,10 +197,13 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
261 unsigned int key_len) 197 unsigned int key_len)
262{ 198{
263 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 199 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
264 struct scatterlist sg; 200 struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
265 unsigned int block_size = 201 struct {
266 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 202 struct shash_desc sdesc;
267 unsigned int digest_size = crypto_ahash_digestsize(tfm); 203 char ctx[crypto_shash_descsize(shash)];
204 } desc;
205 unsigned int block_size = crypto_shash_blocksize(shash);
206 unsigned int digest_size = crypto_shash_digestsize(shash);
268 int i, ret; 207 int i, ret;
269 208
270 /* Set to zero until complete */ 209 /* Set to zero until complete */
@@ -277,8 +216,12 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
277 216
278 if (key_len > block_size) { 217 if (key_len > block_size) {
279 /* Must hash the input key */ 218 /* Must hash the input key */
280 sg_init_one(&sg, key, key_len); 219 desc.sdesc.tfm = shash;
281 ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len); 220 desc.sdesc.flags = crypto_ahash_get_flags(tfm) &
221 CRYPTO_TFM_REQ_MAY_SLEEP;
222
223 ret = crypto_shash_digest(&desc.sdesc, key, key_len,
224 ctx->u.sha.key);
282 if (ret) { 225 if (ret) {
283 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 226 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
284 return -EINVAL; 227 return -EINVAL;
@@ -293,6 +236,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
293 ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; 236 ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
294 } 237 }
295 238
239 sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size);
240 ctx->u.sha.opad_count = block_size;
241
296 ctx->u.sha.key_len = key_len; 242 ctx->u.sha.key_len = key_len;
297 243
298 return 0; 244 return 0;
@@ -319,10 +265,9 @@ static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
319{ 265{
320 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 266 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
321 struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); 267 struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
322 struct crypto_ahash *hmac_tfm; 268 struct crypto_shash *hmac_tfm;
323 269
324 hmac_tfm = crypto_alloc_ahash(alg->child_alg, 270 hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0);
325 CRYPTO_ALG_TYPE_AHASH, 0);
326 if (IS_ERR(hmac_tfm)) { 271 if (IS_ERR(hmac_tfm)) {
327 pr_warn("could not load driver %s need for HMAC support\n", 272 pr_warn("could not load driver %s need for HMAC support\n",
328 alg->child_alg); 273 alg->child_alg);
@@ -339,35 +284,14 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
339 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 284 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
340 285
341 if (ctx->u.sha.hmac_tfm) 286 if (ctx->u.sha.hmac_tfm)
342 crypto_free_ahash(ctx->u.sha.hmac_tfm); 287 crypto_free_shash(ctx->u.sha.hmac_tfm);
343 288
344 ccp_sha_cra_exit(tfm); 289 ccp_sha_cra_exit(tfm);
345} 290}
346 291
347static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
348 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
349 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
350 cpu_to_be32(SHA1_H4), 0, 0, 0,
351};
352
353static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
354 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
355 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
356 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
357 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
358};
359
360static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
361 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
362 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
363 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
364 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
365};
366
367struct ccp_sha_def { 292struct ccp_sha_def {
368 const char *name; 293 const char *name;
369 const char *drv_name; 294 const char *drv_name;
370 const __be32 *init;
371 enum ccp_sha_type type; 295 enum ccp_sha_type type;
372 u32 digest_size; 296 u32 digest_size;
373 u32 block_size; 297 u32 block_size;
@@ -377,7 +301,6 @@ static struct ccp_sha_def sha_algs[] = {
377 { 301 {
378 .name = "sha1", 302 .name = "sha1",
379 .drv_name = "sha1-ccp", 303 .drv_name = "sha1-ccp",
380 .init = sha1_init,
381 .type = CCP_SHA_TYPE_1, 304 .type = CCP_SHA_TYPE_1,
382 .digest_size = SHA1_DIGEST_SIZE, 305 .digest_size = SHA1_DIGEST_SIZE,
383 .block_size = SHA1_BLOCK_SIZE, 306 .block_size = SHA1_BLOCK_SIZE,
@@ -385,7 +308,6 @@ static struct ccp_sha_def sha_algs[] = {
385 { 308 {
386 .name = "sha224", 309 .name = "sha224",
387 .drv_name = "sha224-ccp", 310 .drv_name = "sha224-ccp",
388 .init = sha224_init,
389 .type = CCP_SHA_TYPE_224, 311 .type = CCP_SHA_TYPE_224,
390 .digest_size = SHA224_DIGEST_SIZE, 312 .digest_size = SHA224_DIGEST_SIZE,
391 .block_size = SHA224_BLOCK_SIZE, 313 .block_size = SHA224_BLOCK_SIZE,
@@ -393,7 +315,6 @@ static struct ccp_sha_def sha_algs[] = {
393 { 315 {
394 .name = "sha256", 316 .name = "sha256",
395 .drv_name = "sha256-ccp", 317 .drv_name = "sha256-ccp",
396 .init = sha256_init,
397 .type = CCP_SHA_TYPE_256, 318 .type = CCP_SHA_TYPE_256,
398 .digest_size = SHA256_DIGEST_SIZE, 319 .digest_size = SHA256_DIGEST_SIZE,
399 .block_size = SHA256_BLOCK_SIZE, 320 .block_size = SHA256_BLOCK_SIZE,
@@ -460,7 +381,6 @@ static int ccp_register_sha_alg(struct list_head *head,
460 381
461 INIT_LIST_HEAD(&ccp_alg->entry); 382 INIT_LIST_HEAD(&ccp_alg->entry);
462 383
463 ccp_alg->init = def->init;
464 ccp_alg->type = def->type; 384 ccp_alg->type = def->type;
465 385
466 alg = &ccp_alg->alg; 386 alg = &ccp_alg->alg;
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index b222231b6169..9aa4ae184f7f 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -137,11 +137,14 @@ struct ccp_aes_cmac_req_ctx {
137#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE 137#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
138 138
139struct ccp_sha_ctx { 139struct ccp_sha_ctx {
140 struct scatterlist opad_sg;
141 unsigned int opad_count;
142
140 unsigned int key_len; 143 unsigned int key_len;
141 u8 key[MAX_SHA_BLOCK_SIZE]; 144 u8 key[MAX_SHA_BLOCK_SIZE];
142 u8 ipad[MAX_SHA_BLOCK_SIZE]; 145 u8 ipad[MAX_SHA_BLOCK_SIZE];
143 u8 opad[MAX_SHA_BLOCK_SIZE]; 146 u8 opad[MAX_SHA_BLOCK_SIZE];
144 struct crypto_ahash *hmac_tfm; 147 struct crypto_shash *hmac_tfm;
145}; 148};
146 149
147struct ccp_sha_req_ctx { 150struct ccp_sha_req_ctx {
@@ -167,9 +170,6 @@ struct ccp_sha_req_ctx {
167 unsigned int buf_count; 170 unsigned int buf_count;
168 u8 buf[MAX_SHA_BLOCK_SIZE]; 171 u8 buf[MAX_SHA_BLOCK_SIZE];
169 172
170 /* HMAC support field */
171 struct scatterlist pad_sg;
172
173 /* CCP driver command */ 173 /* CCP driver command */
174 struct ccp_cmd cmd; 174 struct ccp_cmd cmd;
175}; 175};
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c266a7b154bb..9ae006d69df4 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -23,6 +23,7 @@
23#include <linux/ccp.h> 23#include <linux/ccp.h>
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <crypto/scatterwalk.h> 25#include <crypto/scatterwalk.h>
26#include <crypto/sha.h>
26 27
27#include "ccp-dev.h" 28#include "ccp-dev.h"
28 29
@@ -132,6 +133,27 @@ struct ccp_op {
132 } u; 133 } u;
133}; 134};
134 135
136/* SHA initial context values */
137static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
138 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
139 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
140 cpu_to_be32(SHA1_H4), 0, 0, 0,
141};
142
143static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
144 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
145 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
146 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
147 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
148};
149
150static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
151 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
152 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
153 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
154 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
155};
156
135/* The CCP cannot perform zero-length sha operations so the caller 157/* The CCP cannot perform zero-length sha operations so the caller
136 * is required to buffer data for the final operation. However, a 158 * is required to buffer data for the final operation. However, a
137 * sha operation for a message with a total length of zero is valid 159 * sha operation for a message with a total length of zero is valid
@@ -1411,7 +1433,27 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1411 if (ret) 1433 if (ret)
1412 return ret; 1434 return ret;
1413 1435
1414 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); 1436 if (sha->first) {
1437 const __be32 *init;
1438
1439 switch (sha->type) {
1440 case CCP_SHA_TYPE_1:
1441 init = ccp_sha1_init;
1442 break;
1443 case CCP_SHA_TYPE_224:
1444 init = ccp_sha224_init;
1445 break;
1446 case CCP_SHA_TYPE_256:
1447 init = ccp_sha256_init;
1448 break;
1449 default:
1450 ret = -EINVAL;
1451 goto e_ctx;
1452 }
1453 memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
1454 } else
1455 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
1456
1415 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 1457 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
1416 CCP_PASSTHRU_BYTESWAP_256BIT); 1458 CCP_PASSTHRU_BYTESWAP_256BIT);
1417 if (ret) { 1459 if (ret) {
@@ -1451,6 +1493,66 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1451 1493
1452 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); 1494 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
1453 1495
1496 if (sha->final && sha->opad) {
1497 /* HMAC operation, recursively perform final SHA */
1498 struct ccp_cmd hmac_cmd;
1499 struct scatterlist sg;
1500 u64 block_size, digest_size;
1501 u8 *hmac_buf;
1502
1503 switch (sha->type) {
1504 case CCP_SHA_TYPE_1:
1505 block_size = SHA1_BLOCK_SIZE;
1506 digest_size = SHA1_DIGEST_SIZE;
1507 break;
1508 case CCP_SHA_TYPE_224:
1509 block_size = SHA224_BLOCK_SIZE;
1510 digest_size = SHA224_DIGEST_SIZE;
1511 break;
1512 case CCP_SHA_TYPE_256:
1513 block_size = SHA256_BLOCK_SIZE;
1514 digest_size = SHA256_DIGEST_SIZE;
1515 break;
1516 default:
1517 ret = -EINVAL;
1518 goto e_data;
1519 }
1520
1521 if (sha->opad_len != block_size) {
1522 ret = -EINVAL;
1523 goto e_data;
1524 }
1525
1526 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1527 if (!hmac_buf) {
1528 ret = -ENOMEM;
1529 goto e_data;
1530 }
1531 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1532
1533 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1534 memcpy(hmac_buf + block_size, ctx.address, digest_size);
1535
1536 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1537 hmac_cmd.engine = CCP_ENGINE_SHA;
1538 hmac_cmd.u.sha.type = sha->type;
1539 hmac_cmd.u.sha.ctx = sha->ctx;
1540 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1541 hmac_cmd.u.sha.src = &sg;
1542 hmac_cmd.u.sha.src_len = block_size + digest_size;
1543 hmac_cmd.u.sha.opad = NULL;
1544 hmac_cmd.u.sha.opad_len = 0;
1545 hmac_cmd.u.sha.first = 1;
1546 hmac_cmd.u.sha.final = 1;
1547 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1548
1549 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1550 if (ret)
1551 cmd->engine_error = hmac_cmd.engine_error;
1552
1553 kfree(hmac_buf);
1554 }
1555
1454e_data: 1556e_data:
1455 ccp_free_data(&src, cmd_q); 1557 ccp_free_data(&src, cmd_q);
1456 1558