aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 06:44:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 06:44:48 -0400
commit87d7bcee4f5973a593b0d50134364cfe5652ff33 (patch)
tree677125896b64de2f5acfa204955442f58e74cfa9 /drivers/crypto
parent0223f9aaef94a09ffc0b6abcba732e62a483b88c (diff)
parentbe34c4ef693ff5c10f55606dbd656ddf0b4a8340 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - add multibuffer infrastructure (single_task_running scheduler helper, OKed by Peter on lkml. - add SHA1 multibuffer implementation for AVX2. - reenable "by8" AVX CTR optimisation after fixing counter overflow. - add APM X-Gene SoC RNG support. - SHA256/SHA512 now handles unaligned input correctly. - set lz4 decompressed length correctly. - fix algif socket buffer allocation failure for 64K page machines. - misc fixes * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (47 commits) crypto: sha - Handle unaligned input data in generic sha256 and sha512. Revert "crypto: aesni - disable "by8" AVX CTR optimization" crypto: aesni - remove unused defines in "by8" variant crypto: aesni - fix counter overflow handling in "by8" variant hwrng: printk replacement crypto: qat - Removed unneeded partial state crypto: qat - Fix typo in name of tasklet_struct crypto: caam - Dynamic allocation of addresses for various memory blocks in CAAM. crypto: mcryptd - Fix typos in CRYPTO_MCRYPTD description crypto: algif - avoid excessive use of socket buffer in skcipher arm64: dts: add random number generator dts node to APM X-Gene platform. Documentation: rng: Add X-Gene SoC RNG driver documentation hwrng: xgene - add support for APM X-Gene SoC RNG support crypto: mv_cesa - Add missing #define crypto: testmgr - add test for lz4 and lz4hc crypto: lz4,lz4hc - fix decompression crypto: qat - Use pci_enable_msix_exact() instead of pci_enable_msix() crypto: drbg - fix maximum value checks on 32 bit systems crypto: drbg - fix sparse warning for cpu_to_be[32|64] crypto: sha-mb - sha1_mb_alg_state can be static ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamhash.c28
-rw-r--r--drivers/crypto/caam/ctrl.c138
-rw-r--r--drivers/crypto/caam/intern.h9
-rw-r--r--drivers/crypto/caam/regs.h51
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h54
-rw-r--r--drivers/crypto/mv_cesa.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c66
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c14
10 files changed, 148 insertions, 217 deletions
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index b464d03ebf40..f347ab7eea95 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -836,8 +836,9 @@ static int ahash_update_ctx(struct ahash_request *req)
836 edesc->sec4_sg + sec4_sg_src_index, 836 edesc->sec4_sg + sec4_sg_src_index,
837 chained); 837 chained);
838 if (*next_buflen) { 838 if (*next_buflen) {
839 sg_copy_part(next_buf, req->src, to_hash - 839 scatterwalk_map_and_copy(next_buf, req->src,
840 *buflen, req->nbytes); 840 to_hash - *buflen,
841 *next_buflen, 0);
841 state->current_buf = !state->current_buf; 842 state->current_buf = !state->current_buf;
842 } 843 }
843 } else { 844 } else {
@@ -878,7 +879,8 @@ static int ahash_update_ctx(struct ahash_request *req)
878 kfree(edesc); 879 kfree(edesc);
879 } 880 }
880 } else if (*next_buflen) { 881 } else if (*next_buflen) {
881 sg_copy(buf + *buflen, req->src, req->nbytes); 882 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
883 req->nbytes, 0);
882 *buflen = *next_buflen; 884 *buflen = *next_buflen;
883 *next_buflen = last_buflen; 885 *next_buflen = last_buflen;
884 } 886 }
@@ -1262,8 +1264,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1262 src_map_to_sec4_sg(jrdev, req->src, src_nents, 1264 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1263 edesc->sec4_sg + 1, chained); 1265 edesc->sec4_sg + 1, chained);
1264 if (*next_buflen) { 1266 if (*next_buflen) {
1265 sg_copy_part(next_buf, req->src, to_hash - *buflen, 1267 scatterwalk_map_and_copy(next_buf, req->src,
1266 req->nbytes); 1268 to_hash - *buflen,
1269 *next_buflen, 0);
1267 state->current_buf = !state->current_buf; 1270 state->current_buf = !state->current_buf;
1268 } 1271 }
1269 1272
@@ -1304,7 +1307,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1304 kfree(edesc); 1307 kfree(edesc);
1305 } 1308 }
1306 } else if (*next_buflen) { 1309 } else if (*next_buflen) {
1307 sg_copy(buf + *buflen, req->src, req->nbytes); 1310 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1311 req->nbytes, 0);
1308 *buflen = *next_buflen; 1312 *buflen = *next_buflen;
1309 *next_buflen = 0; 1313 *next_buflen = 0;
1310 } 1314 }
@@ -1413,9 +1417,9 @@ static int ahash_update_first(struct ahash_request *req)
1413 struct device *jrdev = ctx->jrdev; 1417 struct device *jrdev = ctx->jrdev;
1414 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1418 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1415 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1419 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1416 u8 *next_buf = state->buf_0 + state->current_buf * 1420 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1417 CAAM_MAX_HASH_BLOCK_SIZE; 1421 int *next_buflen = state->current_buf ?
1418 int *next_buflen = &state->buflen_0 + state->current_buf; 1422 &state->buflen_1 : &state->buflen_0;
1419 int to_hash; 1423 int to_hash;
1420 u32 *sh_desc = ctx->sh_desc_update_first, *desc; 1424 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1421 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1425 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
@@ -1476,7 +1480,8 @@ static int ahash_update_first(struct ahash_request *req)
1476 } 1480 }
1477 1481
1478 if (*next_buflen) 1482 if (*next_buflen)
1479 sg_copy_part(next_buf, req->src, to_hash, req->nbytes); 1483 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1484 *next_buflen, 0);
1480 1485
1481 sh_len = desc_len(sh_desc); 1486 sh_len = desc_len(sh_desc);
1482 desc = edesc->hw_desc; 1487 desc = edesc->hw_desc;
@@ -1511,7 +1516,8 @@ static int ahash_update_first(struct ahash_request *req)
1511 state->update = ahash_update_no_ctx; 1516 state->update = ahash_update_no_ctx;
1512 state->finup = ahash_finup_no_ctx; 1517 state->finup = ahash_finup_no_ctx;
1513 state->final = ahash_final_no_ctx; 1518 state->final = ahash_final_no_ctx;
1514 sg_copy(next_buf, req->src, req->nbytes); 1519 scatterwalk_map_and_copy(next_buf, req->src, 0,
1520 req->nbytes, 0);
1515 } 1521 }
1516#ifdef DEBUG 1522#ifdef DEBUG
1517 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1523 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 3cade79ea41e..31000c8c4a90 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,5 +1,4 @@
1/* 1/* * CAAM control-plane driver backend
2 * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization 2 * Controller-level driver, kernel property detection, initialization
4 * 3 *
5 * Copyright 2008-2012 Freescale Semiconductor, Inc. 4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
@@ -81,38 +80,37 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
81 u32 *status) 80 u32 *status)
82{ 81{
83 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 82 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
84 struct caam_full __iomem *topregs; 83 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
84 struct caam_deco __iomem *deco = ctrlpriv->deco;
85 unsigned int timeout = 100000; 85 unsigned int timeout = 100000;
86 u32 deco_dbg_reg, flags; 86 u32 deco_dbg_reg, flags;
87 int i; 87 int i;
88 88
89 /* Set the bit to request direct access to DECO0 */
90 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
91 89
92 if (ctrlpriv->virt_en == 1) { 90 if (ctrlpriv->virt_en == 1) {
93 setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); 91 setbits32(&ctrl->deco_rsr, DECORSR_JR0);
94 92
95 while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) && 93 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
96 --timeout) 94 --timeout)
97 cpu_relax(); 95 cpu_relax();
98 96
99 timeout = 100000; 97 timeout = 100000;
100 } 98 }
101 99
102 setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 100 setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
103 101
104 while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && 102 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
105 --timeout) 103 --timeout)
106 cpu_relax(); 104 cpu_relax();
107 105
108 if (!timeout) { 106 if (!timeout) {
109 dev_err(ctrldev, "failed to acquire DECO 0\n"); 107 dev_err(ctrldev, "failed to acquire DECO 0\n");
110 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 108 clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
111 return -ENODEV; 109 return -ENODEV;
112 } 110 }
113 111
114 for (i = 0; i < desc_len(desc); i++) 112 for (i = 0; i < desc_len(desc); i++)
115 wr_reg32(&topregs->deco.descbuf[i], *(desc + i)); 113 wr_reg32(&deco->descbuf[i], *(desc + i));
116 114
117 flags = DECO_JQCR_WHL; 115 flags = DECO_JQCR_WHL;
118 /* 116 /*
@@ -123,11 +121,11 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
123 flags |= DECO_JQCR_FOUR; 121 flags |= DECO_JQCR_FOUR;
124 122
125 /* Instruct the DECO to execute it */ 123 /* Instruct the DECO to execute it */
126 wr_reg32(&topregs->deco.jr_ctl_hi, flags); 124 wr_reg32(&deco->jr_ctl_hi, flags);
127 125
128 timeout = 10000000; 126 timeout = 10000000;
129 do { 127 do {
130 deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg); 128 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
131 /* 129 /*
132 * If an error occured in the descriptor, then 130 * If an error occured in the descriptor, then
133 * the DECO status field will be set to 0x0D 131 * the DECO status field will be set to 0x0D
@@ -138,14 +136,14 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
138 cpu_relax(); 136 cpu_relax();
139 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); 137 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
140 138
141 *status = rd_reg32(&topregs->deco.op_status_hi) & 139 *status = rd_reg32(&deco->op_status_hi) &
142 DECO_OP_STATUS_HI_ERR_MASK; 140 DECO_OP_STATUS_HI_ERR_MASK;
143 141
144 if (ctrlpriv->virt_en == 1) 142 if (ctrlpriv->virt_en == 1)
145 clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); 143 clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
146 144
147 /* Mark the DECO as free */ 145 /* Mark the DECO as free */
148 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 146 clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
149 147
150 if (!timeout) 148 if (!timeout)
151 return -EAGAIN; 149 return -EAGAIN;
@@ -176,13 +174,13 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
176 int gen_sk) 174 int gen_sk)
177{ 175{
178 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 176 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
179 struct caam_full __iomem *topregs; 177 struct caam_ctrl __iomem *ctrl;
180 struct rng4tst __iomem *r4tst; 178 struct rng4tst __iomem *r4tst;
181 u32 *desc, status, rdsta_val; 179 u32 *desc, status, rdsta_val;
182 int ret = 0, sh_idx; 180 int ret = 0, sh_idx;
183 181
184 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 182 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
185 r4tst = &topregs->ctrl.r4tst[0]; 183 r4tst = &ctrl->r4tst[0];
186 184
187 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); 185 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
188 if (!desc) 186 if (!desc)
@@ -212,12 +210,11 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
212 * CAAM eras), then try again. 210 * CAAM eras), then try again.
213 */ 211 */
214 rdsta_val = 212 rdsta_val =
215 rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK; 213 rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
216 if (status || !(rdsta_val & (1 << sh_idx))) 214 if (status || !(rdsta_val & (1 << sh_idx)))
217 ret = -EAGAIN; 215 ret = -EAGAIN;
218 if (ret) 216 if (ret)
219 break; 217 break;
220
221 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); 218 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
222 /* Clear the contents before recreating the descriptor */ 219 /* Clear the contents before recreating the descriptor */
223 memset(desc, 0x00, CAAM_CMD_SZ * 7); 220 memset(desc, 0x00, CAAM_CMD_SZ * 7);
@@ -285,12 +282,12 @@ static int caam_remove(struct platform_device *pdev)
285{ 282{
286 struct device *ctrldev; 283 struct device *ctrldev;
287 struct caam_drv_private *ctrlpriv; 284 struct caam_drv_private *ctrlpriv;
288 struct caam_full __iomem *topregs; 285 struct caam_ctrl __iomem *ctrl;
289 int ring, ret = 0; 286 int ring, ret = 0;
290 287
291 ctrldev = &pdev->dev; 288 ctrldev = &pdev->dev;
292 ctrlpriv = dev_get_drvdata(ctrldev); 289 ctrlpriv = dev_get_drvdata(ctrldev);
293 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 290 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
294 291
295 /* Remove platform devices for JobRs */ 292 /* Remove platform devices for JobRs */
296 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { 293 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
@@ -308,7 +305,7 @@ static int caam_remove(struct platform_device *pdev)
308#endif 305#endif
309 306
310 /* Unmap controller region */ 307 /* Unmap controller region */
311 iounmap(&topregs->ctrl); 308 iounmap(&ctrl);
312 309
313 return ret; 310 return ret;
314} 311}
@@ -323,12 +320,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
323{ 320{
324 struct device *ctrldev = &pdev->dev; 321 struct device *ctrldev = &pdev->dev;
325 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 322 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
326 struct caam_full __iomem *topregs; 323 struct caam_ctrl __iomem *ctrl;
327 struct rng4tst __iomem *r4tst; 324 struct rng4tst __iomem *r4tst;
328 u32 val; 325 u32 val;
329 326
330 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 327 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
331 r4tst = &topregs->ctrl.r4tst[0]; 328 r4tst = &ctrl->r4tst[0];
332 329
333 /* put RNG4 into program mode */ 330 /* put RNG4 into program mode */
334 setbits32(&r4tst->rtmctl, RTMCTL_PRGM); 331 setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
@@ -355,10 +352,19 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
355 wr_reg32(&r4tst->rtsdctl, val); 352 wr_reg32(&r4tst->rtsdctl, val);
356 /* min. freq. count, equal to 1/4 of the entropy sample length */ 353 /* min. freq. count, equal to 1/4 of the entropy sample length */
357 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); 354 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
358 /* max. freq. count, equal to 8 times the entropy sample length */ 355 /* disable maximum frequency count */
359 wr_reg32(&r4tst->rtfrqmax, ent_delay << 3); 356 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
357 /* read the control register */
358 val = rd_reg32(&r4tst->rtmctl);
359 /*
360 * select raw sampling in both entropy shifter
361 * and statistical checker
362 */
363 setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
360 /* put RNG4 into run mode */ 364 /* put RNG4 into run mode */
361 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); 365 clrbits32(&val, RTMCTL_PRGM);
366 /* write back the control register */
367 wr_reg32(&r4tst->rtmctl, val);
362} 368}
363 369
364/** 370/**
@@ -387,13 +393,14 @@ static int caam_probe(struct platform_device *pdev)
387 struct device *dev; 393 struct device *dev;
388 struct device_node *nprop, *np; 394 struct device_node *nprop, *np;
389 struct caam_ctrl __iomem *ctrl; 395 struct caam_ctrl __iomem *ctrl;
390 struct caam_full __iomem *topregs;
391 struct caam_drv_private *ctrlpriv; 396 struct caam_drv_private *ctrlpriv;
392#ifdef CONFIG_DEBUG_FS 397#ifdef CONFIG_DEBUG_FS
393 struct caam_perfmon *perfmon; 398 struct caam_perfmon *perfmon;
394#endif 399#endif
395 u32 scfgr, comp_params; 400 u32 scfgr, comp_params;
396 u32 cha_vid_ls; 401 u32 cha_vid_ls;
402 int pg_size;
403 int BLOCK_OFFSET = 0;
397 404
398 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), 405 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
399 GFP_KERNEL); 406 GFP_KERNEL);
@@ -412,10 +419,27 @@ static int caam_probe(struct platform_device *pdev)
412 dev_err(dev, "caam: of_iomap() failed\n"); 419 dev_err(dev, "caam: of_iomap() failed\n");
413 return -ENOMEM; 420 return -ENOMEM;
414 } 421 }
415 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl; 422 /* Finding the page size for using the CTPR_MS register */
423 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
424 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
416 425
417 /* topregs used to derive pointers to CAAM sub-blocks only */ 426 /* Allocating the BLOCK_OFFSET based on the supported page size on
418 topregs = (struct caam_full __iomem *)ctrl; 427 * the platform
428 */
429 if (pg_size == 0)
430 BLOCK_OFFSET = PG_SIZE_4K;
431 else
432 BLOCK_OFFSET = PG_SIZE_64K;
433
434 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
435 ctrlpriv->assure = (struct caam_assurance __force *)
436 ((uint8_t *)ctrl +
437 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
438 );
439 ctrlpriv->deco = (struct caam_deco __force *)
440 ((uint8_t *)ctrl +
441 BLOCK_OFFSET * DECO_BLOCK_NUMBER
442 );
419 443
420 /* Get the IRQ of the controller (for security violations only) */ 444 /* Get the IRQ of the controller (for security violations only) */
421 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0); 445 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
@@ -424,15 +448,14 @@ static int caam_probe(struct platform_device *pdev)
424 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, 448 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
425 * long pointers in master configuration register 449 * long pointers in master configuration register
426 */ 450 */
427 setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | 451 setbits32(&ctrl->mcr, MCFGR_WDENABLE |
428 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); 452 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
429 453
430 /* 454 /*
431 * Read the Compile Time paramters and SCFGR to determine 455 * Read the Compile Time paramters and SCFGR to determine
432 * if Virtualization is enabled for this platform 456 * if Virtualization is enabled for this platform
433 */ 457 */
434 comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms); 458 scfgr = rd_reg32(&ctrl->scfgr);
435 scfgr = rd_reg32(&topregs->ctrl.scfgr);
436 459
437 ctrlpriv->virt_en = 0; 460 ctrlpriv->virt_en = 0;
438 if (comp_params & CTPR_MS_VIRT_EN_INCL) { 461 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
@@ -450,7 +473,7 @@ static int caam_probe(struct platform_device *pdev)
450 } 473 }
451 474
452 if (ctrlpriv->virt_en == 1) 475 if (ctrlpriv->virt_en == 1)
453 setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START | 476 setbits32(&ctrl->jrstart, JRSTART_JR0_START |
454 JRSTART_JR1_START | JRSTART_JR2_START | 477 JRSTART_JR1_START | JRSTART_JR2_START |
455 JRSTART_JR3_START); 478 JRSTART_JR3_START);
456 479
@@ -477,7 +500,7 @@ static int caam_probe(struct platform_device *pdev)
477 sizeof(struct platform_device *) * rspec, 500 sizeof(struct platform_device *) * rspec,
478 GFP_KERNEL); 501 GFP_KERNEL);
479 if (ctrlpriv->jrpdev == NULL) { 502 if (ctrlpriv->jrpdev == NULL) {
480 iounmap(&topregs->ctrl); 503 iounmap(&ctrl);
481 return -ENOMEM; 504 return -ENOMEM;
482 } 505 }
483 506
@@ -493,18 +516,26 @@ static int caam_probe(struct platform_device *pdev)
493 ring); 516 ring);
494 continue; 517 continue;
495 } 518 }
519 ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
520 ((uint8_t *)ctrl +
521 (ring + JR_BLOCK_NUMBER) *
522 BLOCK_OFFSET
523 );
496 ctrlpriv->total_jobrs++; 524 ctrlpriv->total_jobrs++;
497 ring++; 525 ring++;
498 } 526 }
499 527
500 /* Check to see if QI present. If so, enable */ 528 /* Check to see if QI present. If so, enable */
501 ctrlpriv->qi_present = 529 ctrlpriv->qi_present =
502 !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) & 530 !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
503 CTPR_MS_QI_MASK); 531 CTPR_MS_QI_MASK);
504 if (ctrlpriv->qi_present) { 532 if (ctrlpriv->qi_present) {
505 ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi; 533 ctrlpriv->qi = (struct caam_queue_if __force *)
534 ((uint8_t *)ctrl +
535 BLOCK_OFFSET * QI_BLOCK_NUMBER
536 );
506 /* This is all that's required to physically enable QI */ 537 /* This is all that's required to physically enable QI */
507 wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN); 538 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
508 } 539 }
509 540
510 /* If no QI and no rings specified, quit and go home */ 541 /* If no QI and no rings specified, quit and go home */
@@ -514,7 +545,7 @@ static int caam_probe(struct platform_device *pdev)
514 return -ENOMEM; 545 return -ENOMEM;
515 } 546 }
516 547
517 cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls); 548 cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
518 549
519 /* 550 /*
520 * If SEC has RNG version >= 4 and RNG state handle has not been 551 * If SEC has RNG version >= 4 and RNG state handle has not been
@@ -522,7 +553,7 @@ static int caam_probe(struct platform_device *pdev)
522 */ 553 */
523 if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { 554 if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
524 ctrlpriv->rng4_sh_init = 555 ctrlpriv->rng4_sh_init =
525 rd_reg32(&topregs->ctrl.r4tst[0].rdsta); 556 rd_reg32(&ctrl->r4tst[0].rdsta);
526 /* 557 /*
527 * If the secure keys (TDKEK, JDKEK, TDSK), were already 558 * If the secure keys (TDKEK, JDKEK, TDSK), were already
528 * generated, signal this to the function that is instantiating 559 * generated, signal this to the function that is instantiating
@@ -533,7 +564,7 @@ static int caam_probe(struct platform_device *pdev)
533 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK; 564 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
534 do { 565 do {
535 int inst_handles = 566 int inst_handles =
536 rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & 567 rd_reg32(&ctrl->r4tst[0].rdsta) &
537 RDSTA_IFMASK; 568 RDSTA_IFMASK;
538 /* 569 /*
539 * If either SH were instantiated by somebody else 570 * If either SH were instantiated by somebody else
@@ -544,6 +575,9 @@ static int caam_probe(struct platform_device *pdev)
544 * the TRNG parameters. 575 * the TRNG parameters.
545 */ 576 */
546 if (!(ctrlpriv->rng4_sh_init || inst_handles)) { 577 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
578 dev_info(dev,
579 "Entropy delay = %u\n",
580 ent_delay);
547 kick_trng(pdev, ent_delay); 581 kick_trng(pdev, ent_delay);
548 ent_delay += 400; 582 ent_delay += 400;
549 } 583 }
@@ -556,6 +590,12 @@ static int caam_probe(struct platform_device *pdev)
556 */ 590 */
557 ret = instantiate_rng(dev, inst_handles, 591 ret = instantiate_rng(dev, inst_handles,
558 gen_sk); 592 gen_sk);
593 if (ret == -EAGAIN)
594 /*
595 * if here, the loop will rerun,
596 * so don't hog the CPU
597 */
598 cpu_relax();
559 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); 599 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
560 if (ret) { 600 if (ret) {
561 dev_err(dev, "failed to instantiate RNG"); 601 dev_err(dev, "failed to instantiate RNG");
@@ -569,13 +609,13 @@ static int caam_probe(struct platform_device *pdev)
569 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; 609 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
570 610
571 /* Enable RDB bit so that RNG works faster */ 611 /* Enable RDB bit so that RNG works faster */
572 setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); 612 setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
573 } 613 }
574 614
575 /* NOTE: RTIC detection ought to go here, around Si time */ 615 /* NOTE: RTIC detection ought to go here, around Si time */
576 616
577 caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 | 617 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
578 (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls); 618 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
579 619
580 /* Report "alive" for developer to see */ 620 /* Report "alive" for developer to see */
581 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, 621 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 97363db4e56e..89b94cc9e7a2 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -70,10 +70,11 @@ struct caam_drv_private {
70 struct platform_device *pdev; 70 struct platform_device *pdev;
71 71
72 /* Physical-presence section */ 72 /* Physical-presence section */
73 struct caam_ctrl *ctrl; /* controller region */ 73 struct caam_ctrl __iomem *ctrl; /* controller region */
74 struct caam_deco **deco; /* DECO/CCB views */ 74 struct caam_deco __iomem *deco; /* DECO/CCB views */
75 struct caam_assurance *ac; 75 struct caam_assurance __iomem *assure;
76 struct caam_queue_if *qi; /* QI control region */ 76 struct caam_queue_if __iomem *qi; /* QI control region */
77 struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
77 78
78 /* 79 /*
79 * Detected geometry block. Filled in from device tree if powerpc, 80 * Detected geometry block. Filled in from device tree if powerpc,
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index f48e344ffc39..378ddc17f60e 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -194,6 +194,8 @@ struct caam_perfmon {
194#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) 194#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
195#define CTPR_MS_VIRT_EN_INCL 0x00000001 195#define CTPR_MS_VIRT_EN_INCL 0x00000001
196#define CTPR_MS_VIRT_EN_POR 0x00000002 196#define CTPR_MS_VIRT_EN_POR 0x00000002
197#define CTPR_MS_PG_SZ_MASK 0x10
198#define CTPR_MS_PG_SZ_SHIFT 4
197 u32 comp_parms_ms; /* CTPR - Compile Parameters Register */ 199 u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
198 u32 comp_parms_ls; /* CTPR - Compile Parameters Register */ 200 u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
199 u64 rsvd1[2]; 201 u64 rsvd1[2];
@@ -269,6 +271,16 @@ struct rngtst {
269/* RNG4 TRNG test registers */ 271/* RNG4 TRNG test registers */
270struct rng4tst { 272struct rng4tst {
271#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ 273#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
274#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
275 both entropy shifter and
276 statistical checker */
277#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both
278 entropy shifter and
279 statistical checker */
280#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in
281 entropy shifter, raw data
282 in statistical checker */
283#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */
272 u32 rtmctl; /* misc. control register */ 284 u32 rtmctl; /* misc. control register */
273 u32 rtscmisc; /* statistical check misc. register */ 285 u32 rtscmisc; /* statistical check misc. register */
274 u32 rtpkrrng; /* poker range register */ 286 u32 rtpkrrng; /* poker range register */
@@ -278,7 +290,7 @@ struct rng4tst {
278 }; 290 };
279#define RTSDCTL_ENT_DLY_SHIFT 16 291#define RTSDCTL_ENT_DLY_SHIFT 16
280#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) 292#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
281#define RTSDCTL_ENT_DLY_MIN 1200 293#define RTSDCTL_ENT_DLY_MIN 3200
282#define RTSDCTL_ENT_DLY_MAX 12800 294#define RTSDCTL_ENT_DLY_MAX 12800
283 u32 rtsdctl; /* seed control register */ 295 u32 rtsdctl; /* seed control register */
284 union { 296 union {
@@ -286,6 +298,7 @@ struct rng4tst {
286 u32 rttotsam; /* PRGM=0: total samples register */ 298 u32 rttotsam; /* PRGM=0: total samples register */
287 }; 299 };
288 u32 rtfrqmin; /* frequency count min. limit register */ 300 u32 rtfrqmin; /* frequency count min. limit register */
301#define RTFRQMAX_DISABLE (1 << 20)
289 union { 302 union {
290 u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ 303 u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
291 u32 rtfrqcnt; /* PRGM=0: freq. count register */ 304 u32 rtfrqcnt; /* PRGM=0: freq. count register */
@@ -758,34 +771,10 @@ struct caam_deco {
758#define DECO_JQCR_WHL 0x20000000 771#define DECO_JQCR_WHL 0x20000000
759#define DECO_JQCR_FOUR 0x10000000 772#define DECO_JQCR_FOUR 0x10000000
760 773
761/* 774#define JR_BLOCK_NUMBER 1
762 * Current top-level view of memory map is: 775#define ASSURE_BLOCK_NUMBER 6
763 * 776#define QI_BLOCK_NUMBER 7
764 * 0x0000 - 0x0fff - CAAM Top-Level Control 777#define DECO_BLOCK_NUMBER 8
765 * 0x1000 - 0x1fff - Job Ring 0 778#define PG_SIZE_4K 0x1000
766 * 0x2000 - 0x2fff - Job Ring 1 779#define PG_SIZE_64K 0x10000
767 * 0x3000 - 0x3fff - Job Ring 2
768 * 0x4000 - 0x4fff - Job Ring 3
769 * 0x5000 - 0x5fff - (unused)
770 * 0x6000 - 0x6fff - Assurance Controller
771 * 0x7000 - 0x7fff - Queue Interface
772 * 0x8000 - 0x8fff - DECO-CCB 0
773 * 0x9000 - 0x9fff - DECO-CCB 1
774 * 0xa000 - 0xafff - DECO-CCB 2
775 * 0xb000 - 0xbfff - DECO-CCB 3
776 * 0xc000 - 0xcfff - DECO-CCB 4
777 *
778 * caam_full describes the full register view of CAAM if useful,
779 * although many configurations may choose to implement parts of
780 * the register map separately, in differing privilege regions
781 */
782struct caam_full {
783 struct caam_ctrl __iomem ctrl;
784 struct caam_job_ring jr[4];
785 u64 rsvd[512];
786 struct caam_assurance assure;
787 struct caam_queue_if qi;
788 struct caam_deco deco;
789};
790
791#endif /* REGS_H */ 780#endif /* REGS_H */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index b12ff85f4241..ce28a563effc 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -116,57 +116,3 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
116 } 116 }
117 return nents; 117 return nents;
118} 118}
119
120/* Map SG page in kernel virtual address space and copy */
121static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
122 int len, int offset)
123{
124 u8 *mapped_addr;
125
126 /*
127 * Page here can be user-space pinned using get_user_pages
128 * Same must be kmapped before use and kunmapped subsequently
129 */
130 mapped_addr = kmap_atomic(sg_page(sg));
131 memcpy(dest, mapped_addr + offset, len);
132 kunmap_atomic(mapped_addr);
133}
134
135/* Copy from len bytes of sg to dest, starting from beginning */
136static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
137{
138 struct scatterlist *current_sg = sg;
139 int cpy_index = 0, next_cpy_index = current_sg->length;
140
141 while (next_cpy_index < len) {
142 sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
143 current_sg->offset);
144 current_sg = scatterwalk_sg_next(current_sg);
145 cpy_index = next_cpy_index;
146 next_cpy_index += current_sg->length;
147 }
148 if (cpy_index < len)
149 sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
150 current_sg->offset);
151}
152
153/* Copy sg data, from to_skip to end, to dest */
154static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
155 int to_skip, unsigned int end)
156{
157 struct scatterlist *current_sg = sg;
158 int sg_index, cpy_index, offset;
159
160 sg_index = current_sg->length;
161 while (sg_index <= to_skip) {
162 current_sg = scatterwalk_sg_next(current_sg);
163 sg_index += current_sg->length;
164 }
165 cpy_index = sg_index - to_skip;
166 offset = current_sg->offset + current_sg->length - cpy_index;
167 sg_map_copy(dest, current_sg, cpy_index, offset);
168 if (end - sg_index) {
169 current_sg = scatterwalk_sg_next(current_sg);
170 sg_copy(dest + cpy_index, current_sg, end - sg_index);
171 }
172}
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index 08fcb1116d90..9249d3ed184b 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,4 +1,5 @@
1#ifndef __MV_CRYPTO_H__ 1#ifndef __MV_CRYPTO_H__
2#define __MV_CRYPTO_H__
2 3
3#define DIGEST_INITIAL_VAL_A 0xdd00 4#define DIGEST_INITIAL_VAL_A 0xdd00
4#define DIGEST_INITIAL_VAL_B 0xdd04 5#define DIGEST_INITIAL_VAL_B 0xdd04
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 6a92284a86b2..244d73378f0e 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -111,7 +111,7 @@ static int adf_chr_drv_create(void)
111 drv_device = device_create(adt_ctl_drv.drv_class, NULL, 111 drv_device = device_create(adt_ctl_drv.drv_class, NULL,
112 MKDEV(adt_ctl_drv.major, 0), 112 MKDEV(adt_ctl_drv.major, 0),
113 NULL, DEVICE_NAME); 113 NULL, DEVICE_NAME);
114 if (!drv_device) { 114 if (IS_ERR(drv_device)) {
115 pr_err("QAT: failed to create device\n"); 115 pr_err("QAT: failed to create device\n");
116 goto err_cdev_del; 116 goto err_cdev_del;
117 } 117 }
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index f854bac276b0..c40546079981 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -75,7 +75,7 @@ struct adf_etr_ring_data {
75 75
76struct adf_etr_bank_data { 76struct adf_etr_bank_data {
77 struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; 77 struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
78 struct tasklet_struct resp_hanlder; 78 struct tasklet_struct resp_handler;
79 void __iomem *csr_addr; 79 void __iomem *csr_addr;
80 struct adf_accel_dev *accel_dev; 80 struct adf_accel_dev *accel_dev;
81 uint32_t irq_coalesc_timer; 81 uint32_t irq_coalesc_timer;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59df48872955..3e26fa2b293f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -105,7 +105,7 @@ struct qat_alg_cd {
105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) 105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106 106
107struct qat_auth_state { 107struct qat_auth_state {
108 uint8_t data[MAX_AUTH_STATE_SIZE]; 108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
109} __aligned(64); 109} __aligned(64);
110 110
111struct qat_alg_session_ctx { 111struct qat_alg_session_ctx {
@@ -113,10 +113,6 @@ struct qat_alg_session_ctx {
113 dma_addr_t enc_cd_paddr; 113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd; 114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr; 115 dma_addr_t dec_cd_paddr;
116 struct qat_auth_state *auth_hw_state_enc;
117 dma_addr_t auth_state_enc_paddr;
118 struct qat_auth_state *auth_hw_state_dec;
119 dma_addr_t auth_state_dec_paddr;
120 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; 116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
121 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; 117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
122 struct qat_crypto_instance *inst; 118 struct qat_crypto_instance *inst;
@@ -150,8 +146,9 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
150static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, 146static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
151 struct qat_alg_session_ctx *ctx, 147 struct qat_alg_session_ctx *ctx,
152 const uint8_t *auth_key, 148 const uint8_t *auth_key,
153 unsigned int auth_keylen, uint8_t *auth_state) 149 unsigned int auth_keylen)
154{ 150{
151 struct qat_auth_state auth_state;
155 struct { 152 struct {
156 struct shash_desc shash; 153 struct shash_desc shash;
157 char ctx[crypto_shash_descsize(ctx->hash_tfm)]; 154 char ctx[crypto_shash_descsize(ctx->hash_tfm)];
@@ -161,12 +158,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
161 struct sha512_state sha512; 158 struct sha512_state sha512;
162 int block_size = crypto_shash_blocksize(ctx->hash_tfm); 159 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
163 int digest_size = crypto_shash_digestsize(ctx->hash_tfm); 160 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
164 uint8_t *ipad = auth_state; 161 uint8_t *ipad = auth_state.data;
165 uint8_t *opad = ipad + block_size; 162 uint8_t *opad = ipad + block_size;
166 __be32 *hash_state_out; 163 __be32 *hash_state_out;
167 __be64 *hash512_state_out; 164 __be64 *hash512_state_out;
168 int i, offset; 165 int i, offset;
169 166
167 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
170 desc.shash.tfm = ctx->hash_tfm; 168 desc.shash.tfm = ctx->hash_tfm;
171 desc.shash.flags = 0x0; 169 desc.shash.flags = 0x0;
172 170
@@ -298,10 +296,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
298 void *ptr = &req_tmpl->cd_ctrl; 296 void *ptr = &req_tmpl->cd_ctrl;
299 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; 297 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
300 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; 298 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
301 struct icp_qat_fw_la_auth_req_params *auth_param =
302 (struct icp_qat_fw_la_auth_req_params *)
303 ((char *)&req_tmpl->serv_specif_rqpars +
304 sizeof(struct icp_qat_fw_la_cipher_req_params));
305 299
306 /* CD setup */ 300 /* CD setup */
307 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); 301 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
@@ -312,8 +306,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
312 hash->sha.inner_setup.auth_counter.counter = 306 hash->sha.inner_setup.auth_counter.counter =
313 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); 307 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
314 308
315 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, 309 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
316 (uint8_t *)ctx->auth_hw_state_enc))
317 return -EFAULT; 310 return -EFAULT;
318 311
319 /* Request setup */ 312 /* Request setup */
@@ -359,9 +352,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
359 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + 352 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
360 ((sizeof(struct icp_qat_hw_auth_setup) + 353 ((sizeof(struct icp_qat_hw_auth_setup) +
361 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); 354 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
362 auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
363 sizeof(struct icp_qat_hw_auth_counter) +
364 round_up(hash_cd_ctrl->inner_state1_sz, 8);
365 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); 355 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
366 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); 356 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
367 return 0; 357 return 0;
@@ -399,8 +389,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
399 hash->sha.inner_setup.auth_counter.counter = 389 hash->sha.inner_setup.auth_counter.counter =
400 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); 390 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
401 391
402 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, 392 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
403 (uint8_t *)ctx->auth_hw_state_dec))
404 return -EFAULT; 393 return -EFAULT;
405 394
406 /* Request setup */ 395 /* Request setup */
@@ -450,9 +439,6 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
450 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + 439 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
451 ((sizeof(struct icp_qat_hw_auth_setup) + 440 ((sizeof(struct icp_qat_hw_auth_setup) +
452 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); 441 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
453 auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
454 sizeof(struct icp_qat_hw_auth_counter) +
455 round_up(hash_cd_ctrl->inner_state1_sz, 8);
456 auth_param->auth_res_sz = digestsize; 442 auth_param->auth_res_sz = digestsize;
457 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); 443 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
458 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); 444 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
@@ -512,10 +498,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
512 dev = &GET_DEV(ctx->inst->accel_dev); 498 dev = &GET_DEV(ctx->inst->accel_dev);
513 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); 499 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
514 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); 500 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
515 memset(ctx->auth_hw_state_enc, 0,
516 sizeof(struct qat_auth_state));
517 memset(ctx->auth_hw_state_dec, 0,
518 sizeof(struct qat_auth_state));
519 memset(&ctx->enc_fw_req_tmpl, 0, 501 memset(&ctx->enc_fw_req_tmpl, 0,
520 sizeof(struct icp_qat_fw_la_bulk_req)); 502 sizeof(struct icp_qat_fw_la_bulk_req));
521 memset(&ctx->dec_fw_req_tmpl, 0, 503 memset(&ctx->dec_fw_req_tmpl, 0,
@@ -548,22 +530,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
548 spin_unlock(&ctx->lock); 530 spin_unlock(&ctx->lock);
549 goto out_free_enc; 531 goto out_free_enc;
550 } 532 }
551 ctx->auth_hw_state_enc =
552 dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
553 &ctx->auth_state_enc_paddr,
554 GFP_ATOMIC);
555 if (!ctx->auth_hw_state_enc) {
556 spin_unlock(&ctx->lock);
557 goto out_free_dec;
558 }
559 ctx->auth_hw_state_dec =
560 dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
561 &ctx->auth_state_dec_paddr,
562 GFP_ATOMIC);
563 if (!ctx->auth_hw_state_dec) {
564 spin_unlock(&ctx->lock);
565 goto out_free_auth_enc;
566 }
567 } 533 }
568 spin_unlock(&ctx->lock); 534 spin_unlock(&ctx->lock);
569 if (qat_alg_init_sessions(ctx, key, keylen)) 535 if (qat_alg_init_sessions(ctx, key, keylen))
@@ -572,14 +538,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
572 return 0; 538 return 0;
573 539
574out_free_all: 540out_free_all:
575 dma_free_coherent(dev, sizeof(struct qat_auth_state),
576 ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
577 ctx->auth_hw_state_dec = NULL;
578out_free_auth_enc:
579 dma_free_coherent(dev, sizeof(struct qat_auth_state),
580 ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
581 ctx->auth_hw_state_enc = NULL;
582out_free_dec:
583 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 541 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
584 ctx->dec_cd, ctx->dec_cd_paddr); 542 ctx->dec_cd, ctx->dec_cd_paddr);
585 ctx->dec_cd = NULL; 543 ctx->dec_cd = NULL;
@@ -924,16 +882,6 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
924 if (ctx->dec_cd) 882 if (ctx->dec_cd)
925 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 883 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
926 ctx->dec_cd, ctx->dec_cd_paddr); 884 ctx->dec_cd, ctx->dec_cd_paddr);
927 if (ctx->auth_hw_state_enc)
928 dma_free_coherent(dev, sizeof(struct qat_auth_state),
929 ctx->auth_hw_state_enc,
930 ctx->auth_state_enc_paddr);
931
932 if (ctx->auth_hw_state_dec)
933 dma_free_coherent(dev, sizeof(struct qat_auth_state),
934 ctx->auth_hw_state_dec,
935 ctx->auth_state_dec_paddr);
936
937 qat_crypto_put_instance(inst); 885 qat_crypto_put_instance(inst);
938} 886}
939 887
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d4172dedf775..67ec61e51185 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -70,9 +70,9 @@ static int adf_enable_msix(struct adf_accel_dev *accel_dev)
70 for (i = 0; i < msix_num_entries; i++) 70 for (i = 0; i < msix_num_entries; i++)
71 pci_dev_info->msix_entries.entries[i].entry = i; 71 pci_dev_info->msix_entries.entries[i].entry = i;
72 72
73 if (pci_enable_msix(pci_dev_info->pci_dev, 73 if (pci_enable_msix_exact(pci_dev_info->pci_dev,
74 pci_dev_info->msix_entries.entries, 74 pci_dev_info->msix_entries.entries,
75 msix_num_entries)) { 75 msix_num_entries)) {
76 pr_err("QAT: Failed to enable MSIX IRQ\n"); 76 pr_err("QAT: Failed to enable MSIX IRQ\n");
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
@@ -89,7 +89,7 @@ static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
89 struct adf_etr_bank_data *bank = bank_ptr; 89 struct adf_etr_bank_data *bank = bank_ptr;
90 90
91 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); 91 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
92 tasklet_hi_schedule(&bank->resp_hanlder); 92 tasklet_hi_schedule(&bank->resp_handler);
93 return IRQ_HANDLED; 93 return IRQ_HANDLED;
94} 94}
95 95
@@ -217,7 +217,7 @@ static int adf_setup_bh(struct adf_accel_dev *accel_dev)
217 int i; 217 int i;
218 218
219 for (i = 0; i < hw_data->num_banks; i++) 219 for (i = 0; i < hw_data->num_banks; i++)
220 tasklet_init(&priv_data->banks[i].resp_hanlder, 220 tasklet_init(&priv_data->banks[i].resp_handler,
221 adf_response_handler, 221 adf_response_handler,
222 (unsigned long)&priv_data->banks[i]); 222 (unsigned long)&priv_data->banks[i]);
223 return 0; 223 return 0;
@@ -230,8 +230,8 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
230 int i; 230 int i;
231 231
232 for (i = 0; i < hw_data->num_banks; i++) { 232 for (i = 0; i < hw_data->num_banks; i++) {
233 tasklet_disable(&priv_data->banks[i].resp_hanlder); 233 tasklet_disable(&priv_data->banks[i].resp_handler);
234 tasklet_kill(&priv_data->banks[i].resp_hanlder); 234 tasklet_kill(&priv_data->banks[i].resp_handler);
235 } 235 }
236} 236}
237 237