aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-07 22:44:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-07 22:44:40 -0400
commit639b4ac691c6f6e48921dc576379c176f82f3250 (patch)
tree6cf521ae7d46ca8dfa139ca67dd32545de8d2a75 /drivers/crypto
parent9d2cd01b15d0782adb81e40094b67904d77b03df (diff)
parent5208ed2ca16526cdbec25abe594a3cc3aea210f4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6 into next
Pull crypto updates from Herbert Xu: "Here is the crypto update for 3.16: - Added test vectors for SHA/AES-CCM/DES-CBC/3DES-CBC. - Fixed a number of error-path memory leaks in tcrypt. - Fixed error-path memory leak in caam. - Removed unnecessary global mutex from mxs-dcp. - Added ahash walk interface that can actually be asynchronous. - Cleaned up caam error reporting. - Allow crypto_user get operation to be used by non-root users. - Add support for SSS module on Exynos. - Misc fixes" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6: (60 commits) crypto: testmgr - add aead cbc des, des3_ede tests crypto: testmgr - Fix DMA-API warning crypto: cesa - tfm->__crt_alg->cra_type directly crypto: sahara - tfm->__crt_alg->cra_name directly crypto: padlock - tfm->__crt_alg->cra_name directly crypto: n2 - tfm->__crt_alg->cra_name directly crypto: dcp - tfm->__crt_alg->cra_name directly crypto: cesa - tfm->__crt_alg->cra_name directly crypto: ccp - tfm->__crt_alg->cra_name directly crypto: geode - Don't use tfm->__crt_alg->cra_name directly crypto: geode - Weed out printk() from probe() crypto: geode - Consistently use AES_KEYSIZE_128 crypto: geode - Kill AES_IV_LENGTH crypto: geode - Kill AES_MIN_BLOCK_SIZE crypto: mxs-dcp - Remove global mutex crypto: hash - Add real ahash walk interface hwrng: n2-drv - Introduce the use of the managed version of kzalloc crypto: caam - reinitialize keys_fit_inline for decrypt and givencrypt crypto: s5p-sss - fix multiplatform build hwrng: timeriomem - remove unnecessary OOM messages ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig6
-rw-r--r--drivers/crypto/atmel-aes.c8
-rw-r--r--drivers/crypto/bfin_crc.c103
-rw-r--r--drivers/crypto/bfin_crc.h125
-rw-r--r--drivers/crypto/caam/caamalg.c31
-rw-r--r--drivers/crypto/caam/caamhash.c32
-rw-r--r--drivers/crypto/caam/caamrng.c7
-rw-r--r--drivers/crypto/caam/error.c393
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/key_gen.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c4
-rw-r--r--drivers/crypto/ccp/ccp-pci.c7
-rw-r--r--drivers/crypto/geode-aes.c28
-rw-r--r--drivers/crypto/geode-aes.h6
-rw-r--r--drivers/crypto/mv_cesa.c6
-rw-r--r--drivers/crypto/mxs-dcp.c52
-rw-r--r--drivers/crypto/n2_core.c4
-rw-r--r--drivers/crypto/nx/nx-842.c4
-rw-r--r--drivers/crypto/omap-des.c33
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/s5p-sss.c148
-rw-r--r--drivers/crypto/sahara.c2
22 files changed, 582 insertions, 428 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 03ccdb0ccf9e..f066fa23cc05 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -301,14 +301,14 @@ config CRYPTO_DEV_SAHARA
301 found in some Freescale i.MX chips. 301 found in some Freescale i.MX chips.
302 302
303config CRYPTO_DEV_S5P 303config CRYPTO_DEV_S5P
304 tristate "Support for Samsung S5PV210 crypto accelerator" 304 tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
305 depends on ARCH_S5PV210 305 depends on ARCH_S5PV210 || ARCH_EXYNOS
306 select CRYPTO_AES 306 select CRYPTO_AES
307 select CRYPTO_ALGAPI 307 select CRYPTO_ALGAPI
308 select CRYPTO_BLKCIPHER 308 select CRYPTO_BLKCIPHER
309 help 309 help
310 This option allows you to have support for S5P crypto acceleration. 310 This option allows you to have support for S5P crypto acceleration.
311 Select this to offload Samsung S5PV210 or S5PC110 from AES 311 Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
312 algorithms execution. 312 algorithms execution.
313 313
314config CRYPTO_DEV_NX 314config CRYPTO_DEV_NX
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index d7c9e317423c..a083474991ab 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -716,6 +716,12 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
716 return -EINVAL; 716 return -EINVAL;
717 } 717 }
718 ctx->block_size = CFB32_BLOCK_SIZE; 718 ctx->block_size = CFB32_BLOCK_SIZE;
719 } else if (mode & AES_FLAGS_CFB64) {
720 if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
721 pr_err("request size is not exact amount of CFB64 blocks\n");
722 return -EINVAL;
723 }
724 ctx->block_size = CFB64_BLOCK_SIZE;
719 } else { 725 } else {
720 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { 726 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
721 pr_err("request size is not exact amount of AES blocks\n"); 727 pr_err("request size is not exact amount of AES blocks\n");
@@ -1069,7 +1075,7 @@ static struct crypto_alg aes_algs[] = {
1069 .cra_driver_name = "atmel-cfb8-aes", 1075 .cra_driver_name = "atmel-cfb8-aes",
1070 .cra_priority = 100, 1076 .cra_priority = 100,
1071 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 1077 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1072 .cra_blocksize = CFB64_BLOCK_SIZE, 1078 .cra_blocksize = CFB8_BLOCK_SIZE,
1073 .cra_ctxsize = sizeof(struct atmel_aes_ctx), 1079 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1074 .cra_alignmask = 0x0, 1080 .cra_alignmask = 0x0,
1075 .cra_type = &crypto_ablkcipher_type, 1081 .cra_type = &crypto_ablkcipher_type,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index c9ff298e6d26..b099e33cb073 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -29,10 +29,11 @@
29#include <crypto/hash.h> 29#include <crypto/hash.h>
30#include <crypto/internal/hash.h> 30#include <crypto/internal/hash.h>
31 31
32#include <asm/blackfin.h>
33#include <asm/bfin_crc.h>
34#include <asm/dma.h> 32#include <asm/dma.h>
35#include <asm/portmux.h> 33#include <asm/portmux.h>
34#include <asm/io.h>
35
36#include "bfin_crc.h"
36 37
37#define CRC_CCRYPTO_QUEUE_LENGTH 5 38#define CRC_CCRYPTO_QUEUE_LENGTH 5
38 39
@@ -54,12 +55,13 @@ struct bfin_crypto_crc {
54 int irq; 55 int irq;
55 int dma_ch; 56 int dma_ch;
56 u32 poly; 57 u32 poly;
57 volatile struct crc_register *regs; 58 struct crc_register *regs;
58 59
59 struct ahash_request *req; /* current request in operation */ 60 struct ahash_request *req; /* current request in operation */
60 struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */ 61 struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
61 dma_addr_t sg_dma; /* phy addr of sg dma descriptors */ 62 dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
62 u8 *sg_mid_buf; 63 u8 *sg_mid_buf;
64 dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
63 65
64 struct tasklet_struct done_task; 66 struct tasklet_struct done_task;
65 struct crypto_queue queue; /* waiting requests */ 67 struct crypto_queue queue; /* waiting requests */
@@ -132,13 +134,13 @@ static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nent
132 134
133static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key) 135static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
134{ 136{
135 crc->regs->datacntrld = 0; 137 writel(0, &crc->regs->datacntrld);
136 crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET; 138 writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
137 crc->regs->curresult = key; 139 writel(key, &crc->regs->curresult);
138 140
139 /* setup CRC interrupts */ 141 /* setup CRC interrupts */
140 crc->regs->status = CMPERRI | DCNTEXPI; 142 writel(CMPERRI | DCNTEXPI, &crc->regs->status);
141 crc->regs->intrenset = CMPERRI | DCNTEXPI; 143 writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
142 144
143 return 0; 145 return 0;
144} 146}
@@ -194,7 +196,6 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
194 dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE); 196 dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
195 197
196 for_each_sg(ctx->sg, sg, ctx->sg_nents, j) { 198 for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
197 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
198 dma_addr = sg_dma_address(sg); 199 dma_addr = sg_dma_address(sg);
199 /* deduce extra bytes in last sg */ 200 /* deduce extra bytes in last sg */
200 if (sg_is_last(sg)) 201 if (sg_is_last(sg))
@@ -207,12 +208,29 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
207 bytes in current sg buffer. Move addr of current 208 bytes in current sg buffer. Move addr of current
208 sg and deduce the length of current sg. 209 sg and deduce the length of current sg.
209 */ 210 */
210 memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count, 211 memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
211 (void *)dma_addr, 212 sg_virt(sg),
212 CHKSUM_DIGEST_SIZE - mid_dma_count); 213 CHKSUM_DIGEST_SIZE - mid_dma_count);
213 dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count; 214 dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
214 dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count; 215 dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
216
217 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
218 DMAEN | PSIZE_32 | WDSIZE_32;
219
220 /* setup new dma descriptor for next middle dma */
221 crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
222 crc->sg_cpu[i].cfg = dma_config;
223 crc->sg_cpu[i].x_count = 1;
224 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
225 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
226 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
227 i, crc->sg_cpu[i].start_addr,
228 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
229 crc->sg_cpu[i].x_modify);
230 i++;
215 } 231 }
232
233 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
216 /* chop current sg dma len to multiple of 32 bits */ 234 /* chop current sg dma len to multiple of 32 bits */
217 mid_dma_count = dma_count % 4; 235 mid_dma_count = dma_count % 4;
218 dma_count &= ~0x3; 236 dma_count &= ~0x3;
@@ -243,24 +261,9 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
243 261
244 if (mid_dma_count) { 262 if (mid_dma_count) {
245 /* copy extra bytes to next middle dma buffer */ 263 /* copy extra bytes to next middle dma buffer */
246 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
247 DMAEN | PSIZE_32 | WDSIZE_32;
248 memcpy(crc->sg_mid_buf + (i << 2), 264 memcpy(crc->sg_mid_buf + (i << 2),
249 (void *)(dma_addr + (dma_count << 2)), 265 (u8*)sg_virt(sg) + (dma_count << 2),
250 mid_dma_count); 266 mid_dma_count);
251 /* setup new dma descriptor for next middle dma */
252 crc->sg_cpu[i].start_addr = dma_map_single(crc->dev,
253 crc->sg_mid_buf + (i << 2),
254 CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
255 crc->sg_cpu[i].cfg = dma_config;
256 crc->sg_cpu[i].x_count = 1;
257 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
258 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
259 "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
260 i, crc->sg_cpu[i].start_addr,
261 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
262 crc->sg_cpu[i].x_modify);
263 i++;
264 } 267 }
265 } 268 }
266 269
@@ -303,6 +306,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
303 int nsg, i, j; 306 int nsg, i, j;
304 unsigned int nextlen; 307 unsigned int nextlen;
305 unsigned long flags; 308 unsigned long flags;
309 u32 reg;
306 310
307 spin_lock_irqsave(&crc->lock, flags); 311 spin_lock_irqsave(&crc->lock, flags);
308 if (req) 312 if (req)
@@ -402,13 +406,14 @@ finish_update:
402 ctx->sg_buflen += CHKSUM_DIGEST_SIZE; 406 ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
403 407
404 /* set CRC data count before start DMA */ 408 /* set CRC data count before start DMA */
405 crc->regs->datacnt = ctx->sg_buflen >> 2; 409 writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
406 410
407 /* setup and enable CRC DMA */ 411 /* setup and enable CRC DMA */
408 bfin_crypto_crc_config_dma(crc); 412 bfin_crypto_crc_config_dma(crc);
409 413
410 /* finally kick off CRC operation */ 414 /* finally kick off CRC operation */
411 crc->regs->control |= BLKEN; 415 reg = readl(&crc->regs->control);
416 writel(reg | BLKEN, &crc->regs->control);
412 417
413 return -EINPROGRESS; 418 return -EINPROGRESS;
414} 419}
@@ -529,14 +534,17 @@ static void bfin_crypto_crc_done_task(unsigned long data)
529static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id) 534static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
530{ 535{
531 struct bfin_crypto_crc *crc = dev_id; 536 struct bfin_crypto_crc *crc = dev_id;
537 u32 reg;
532 538
533 if (crc->regs->status & DCNTEXP) { 539 if (readl(&crc->regs->status) & DCNTEXP) {
534 crc->regs->status = DCNTEXP; 540 writel(DCNTEXP, &crc->regs->status);
535 541
536 /* prepare results */ 542 /* prepare results */
537 put_unaligned_le32(crc->regs->result, crc->req->result); 543 put_unaligned_le32(readl(&crc->regs->result),
544 crc->req->result);
538 545
539 crc->regs->control &= ~BLKEN; 546 reg = readl(&crc->regs->control);
547 writel(reg & ~BLKEN, &crc->regs->control);
540 crc->busy = 0; 548 crc->busy = 0;
541 549
542 if (crc->req->base.complete) 550 if (crc->req->base.complete)
@@ -560,7 +568,7 @@ static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t st
560 struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); 568 struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
561 int i = 100000; 569 int i = 100000;
562 570
563 while ((crc->regs->control & BLKEN) && --i) 571 while ((readl(&crc->regs->control) & BLKEN) && --i)
564 cpu_relax(); 572 cpu_relax();
565 573
566 if (i == 0) 574 if (i == 0)
@@ -647,29 +655,32 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
647 * 1 last + 1 next dma descriptors 655 * 1 last + 1 next dma descriptors
648 */ 656 */
649 crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1)); 657 crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
658 crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
659 * ((CRC_MAX_DMA_DESC + 1) << 1);
650 660
651 crc->regs->control = 0; 661 writel(0, &crc->regs->control);
652 crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data; 662 crc->poly = (u32)pdev->dev.platform_data;
663 writel(crc->poly, &crc->regs->poly);
653 664
654 while (!(crc->regs->status & LUTDONE) && (--timeout) > 0) 665 while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
655 cpu_relax(); 666 cpu_relax();
656 667
657 if (timeout == 0) 668 if (timeout == 0)
658 dev_info(&pdev->dev, "init crc poly timeout\n"); 669 dev_info(&pdev->dev, "init crc poly timeout\n");
659 670
671 platform_set_drvdata(pdev, crc);
672
660 spin_lock(&crc_list.lock); 673 spin_lock(&crc_list.lock);
661 list_add(&crc->list, &crc_list.dev_list); 674 list_add(&crc->list, &crc_list.dev_list);
662 spin_unlock(&crc_list.lock); 675 spin_unlock(&crc_list.lock);
663 676
664 platform_set_drvdata(pdev, crc); 677 if (list_is_singular(&crc_list.dev_list)) {
665 678 ret = crypto_register_ahash(&algs);
666 ret = crypto_register_ahash(&algs); 679 if (ret) {
667 if (ret) { 680 dev_err(&pdev->dev,
668 spin_lock(&crc_list.lock); 681 "Can't register crypto ahash device\n");
669 list_del(&crc->list); 682 goto out_error_dma;
670 spin_unlock(&crc_list.lock); 683 }
671 dev_err(&pdev->dev, "Cann't register crypto ahash device\n");
672 goto out_error_dma;
673 } 684 }
674 685
675 dev_info(&pdev->dev, "initialized\n"); 686 dev_info(&pdev->dev, "initialized\n");
diff --git a/drivers/crypto/bfin_crc.h b/drivers/crypto/bfin_crc.h
new file mode 100644
index 000000000000..75cef4dc85a1
--- /dev/null
+++ b/drivers/crypto/bfin_crc.h
@@ -0,0 +1,125 @@
1/*
2 * bfin_crc.h - interface to Blackfin CRC controllers
3 *
4 * Copyright 2012 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_CRC_H__
10#define __BFIN_CRC_H__
11
12/* Function driver which use hardware crc must initialize the structure */
13struct crc_info {
14 /* Input data address */
15 unsigned char *in_addr;
16 /* Output data address */
17 unsigned char *out_addr;
18 /* Input or output bytes */
19 unsigned long datasize;
20 union {
21 /* CRC to compare with that of input buffer */
22 unsigned long crc_compare;
23 /* Value to compare with input data */
24 unsigned long val_verify;
25 /* Value to fill */
26 unsigned long val_fill;
27 };
28 /* Value to program the 32b CRC Polynomial */
29 unsigned long crc_poly;
30 union {
31 /* CRC calculated from the input data */
32 unsigned long crc_result;
33 /* First failed position to verify input data */
34 unsigned long pos_verify;
35 };
36 /* CRC mirror flags */
37 unsigned int bitmirr:1;
38 unsigned int bytmirr:1;
39 unsigned int w16swp:1;
40 unsigned int fdsel:1;
41 unsigned int rsltmirr:1;
42 unsigned int polymirr:1;
43 unsigned int cmpmirr:1;
44};
45
46/* Userspace interface */
47#define CRC_IOC_MAGIC 'C'
48#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int)
49#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int)
50#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int)
51#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int)
52
53
54#ifdef __KERNEL__
55
56#include <linux/types.h>
57#include <linux/spinlock.h>
58#include <linux/miscdevice.h>
59
60struct crc_register {
61 u32 control;
62 u32 datacnt;
63 u32 datacntrld;
64 u32 __pad_1[2];
65 u32 compare;
66 u32 fillval;
67 u32 datafifo;
68 u32 intren;
69 u32 intrenset;
70 u32 intrenclr;
71 u32 poly;
72 u32 __pad_2[4];
73 u32 status;
74 u32 datacntcap;
75 u32 __pad_3;
76 u32 result;
77 u32 curresult;
78 u32 __pad_4[3];
79 u32 revid;
80};
81
82/* CRC_STATUS Masks */
83#define CMPERR 0x00000002 /* Compare error */
84#define DCNTEXP 0x00000010 /* datacnt register expired */
85#define IBR 0x00010000 /* Input buffer ready */
86#define OBR 0x00020000 /* Output buffer ready */
87#define IRR 0x00040000 /* Immediate result readt */
88#define LUTDONE 0x00080000 /* Look-up table generation done */
89#define FSTAT 0x00700000 /* FIFO status */
90#define MAX_FIFO 4 /* Max fifo size */
91
92/* CRC_CONTROL Masks */
93#define BLKEN 0x00000001 /* Block enable */
94#define OPMODE 0x000000F0 /* Operation mode */
95#define OPMODE_OFFSET 4 /* Operation mode mask offset*/
96#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */
97#define MODE_DATA_FILL 2 /* MTM data fill */
98#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */
99#define MODE_DATA_VERIFY 4 /* MSM data verify */
100#define AUTOCLRZ 0x00000100 /* Auto clear to zero */
101#define AUTOCLRF 0x00000200 /* Auto clear to one */
102#define OBRSTALL 0x00001000 /* Stall on output buffer ready */
103#define IRRSTALL 0x00002000 /* Stall on immediate result ready */
104#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */
105#define BITMIRR_OFFSET 16 /* Mirror bits offset */
106#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */
107#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */
108#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */
109#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */
110#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */
111#define FDSEL_OFFSET 19 /* Mirror FIFO offset */
112#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */
113#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */
114#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */
115#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */
116#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */
117#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */
118
119/* CRC_INTREN Masks */
120#define CMPERRI 0x02 /* CRC_ERROR_INTR */
121#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */
122
123#endif
124
125#endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 5f891254db73..c09ce1f040d3 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -303,6 +303,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
303 * Job Descriptor and Shared Descriptors 303 * Job Descriptor and Shared Descriptors
304 * must all fit into the 64-word Descriptor h/w Buffer 304 * must all fit into the 64-word Descriptor h/w Buffer
305 */ 305 */
306 keys_fit_inline = false;
306 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN + 307 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
307 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) 308 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
308 keys_fit_inline = true; 309 keys_fit_inline = true;
@@ -472,6 +473,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
472 * Job Descriptor and Shared Descriptors 473 * Job Descriptor and Shared Descriptors
473 * must all fit into the 64-word Descriptor h/w Buffer 474 * must all fit into the 64-word Descriptor h/w Buffer
474 */ 475 */
476 keys_fit_inline = false;
475 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + 477 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
476 ctx->split_key_pad_len + ctx->enckeylen <= 478 ctx->split_key_pad_len + ctx->enckeylen <=
477 CAAM_DESC_BYTES_MAX) 479 CAAM_DESC_BYTES_MAX)
@@ -527,6 +529,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
527 * Job Descriptor and Shared Descriptors 529 * Job Descriptor and Shared Descriptors
528 * must all fit into the 64-word Descriptor h/w Buffer 530 * must all fit into the 64-word Descriptor h/w Buffer
529 */ 531 */
532 keys_fit_inline = false;
530 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + 533 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
531 ctx->split_key_pad_len + ctx->enckeylen <= 534 ctx->split_key_pad_len + ctx->enckeylen <=
532 CAAM_DESC_BYTES_MAX) 535 CAAM_DESC_BYTES_MAX)
@@ -918,11 +921,8 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
918 edesc = (struct aead_edesc *)((char *)desc - 921 edesc = (struct aead_edesc *)((char *)desc -
919 offsetof(struct aead_edesc, hw_desc)); 922 offsetof(struct aead_edesc, hw_desc));
920 923
921 if (err) { 924 if (err)
922 char tmp[CAAM_ERROR_STR_MAX]; 925 caam_jr_strstatus(jrdev, err);
923
924 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
925 }
926 926
927 aead_unmap(jrdev, edesc, req); 927 aead_unmap(jrdev, edesc, req);
928 928
@@ -969,11 +969,8 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
969 req->cryptlen - ctx->authsize, 1); 969 req->cryptlen - ctx->authsize, 1);
970#endif 970#endif
971 971
972 if (err) { 972 if (err)
973 char tmp[CAAM_ERROR_STR_MAX]; 973 caam_jr_strstatus(jrdev, err);
974
975 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
976 }
977 974
978 aead_unmap(jrdev, edesc, req); 975 aead_unmap(jrdev, edesc, req);
979 976
@@ -1018,11 +1015,8 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1018 edesc = (struct ablkcipher_edesc *)((char *)desc - 1015 edesc = (struct ablkcipher_edesc *)((char *)desc -
1019 offsetof(struct ablkcipher_edesc, hw_desc)); 1016 offsetof(struct ablkcipher_edesc, hw_desc));
1020 1017
1021 if (err) { 1018 if (err)
1022 char tmp[CAAM_ERROR_STR_MAX]; 1019 caam_jr_strstatus(jrdev, err);
1023
1024 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1025 }
1026 1020
1027#ifdef DEBUG 1021#ifdef DEBUG
1028 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 1022 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
@@ -1053,11 +1047,8 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1053 1047
1054 edesc = (struct ablkcipher_edesc *)((char *)desc - 1048 edesc = (struct ablkcipher_edesc *)((char *)desc -
1055 offsetof(struct ablkcipher_edesc, hw_desc)); 1049 offsetof(struct ablkcipher_edesc, hw_desc));
1056 if (err) { 1050 if (err)
1057 char tmp[CAAM_ERROR_STR_MAX]; 1051 caam_jr_strstatus(jrdev, err);
1058
1059 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1060 }
1061 1052
1062#ifdef DEBUG 1053#ifdef DEBUG
1063 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 1054 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0378328f47a7..0d9284ef96a8 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -545,7 +545,8 @@ static int ahash_setkey(struct crypto_ahash *ahash,
545 DMA_TO_DEVICE); 545 DMA_TO_DEVICE);
546 if (dma_mapping_error(jrdev, ctx->key_dma)) { 546 if (dma_mapping_error(jrdev, ctx->key_dma)) {
547 dev_err(jrdev, "unable to map key i/o memory\n"); 547 dev_err(jrdev, "unable to map key i/o memory\n");
548 return -ENOMEM; 548 ret = -ENOMEM;
549 goto map_err;
549 } 550 }
550#ifdef DEBUG 551#ifdef DEBUG
551 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 552 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
@@ -559,6 +560,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
559 DMA_TO_DEVICE); 560 DMA_TO_DEVICE);
560 } 561 }
561 562
563map_err:
562 kfree(hashed_key); 564 kfree(hashed_key);
563 return ret; 565 return ret;
564badkey: 566badkey:
@@ -631,11 +633,8 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
631 633
632 edesc = (struct ahash_edesc *)((char *)desc - 634 edesc = (struct ahash_edesc *)((char *)desc -
633 offsetof(struct ahash_edesc, hw_desc)); 635 offsetof(struct ahash_edesc, hw_desc));
634 if (err) { 636 if (err)
635 char tmp[CAAM_ERROR_STR_MAX]; 637 caam_jr_strstatus(jrdev, err);
636
637 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
638 }
639 638
640 ahash_unmap(jrdev, edesc, req, digestsize); 639 ahash_unmap(jrdev, edesc, req, digestsize);
641 kfree(edesc); 640 kfree(edesc);
@@ -669,11 +668,8 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
669 668
670 edesc = (struct ahash_edesc *)((char *)desc - 669 edesc = (struct ahash_edesc *)((char *)desc -
671 offsetof(struct ahash_edesc, hw_desc)); 670 offsetof(struct ahash_edesc, hw_desc));
672 if (err) { 671 if (err)
673 char tmp[CAAM_ERROR_STR_MAX]; 672 caam_jr_strstatus(jrdev, err);
674
675 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
676 }
677 673
678 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 674 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
679 kfree(edesc); 675 kfree(edesc);
@@ -707,11 +703,8 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
707 703
708 edesc = (struct ahash_edesc *)((char *)desc - 704 edesc = (struct ahash_edesc *)((char *)desc -
709 offsetof(struct ahash_edesc, hw_desc)); 705 offsetof(struct ahash_edesc, hw_desc));
710 if (err) { 706 if (err)
711 char tmp[CAAM_ERROR_STR_MAX]; 707 caam_jr_strstatus(jrdev, err);
712
713 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
714 }
715 708
716 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 709 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
717 kfree(edesc); 710 kfree(edesc);
@@ -745,11 +738,8 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
745 738
746 edesc = (struct ahash_edesc *)((char *)desc - 739 edesc = (struct ahash_edesc *)((char *)desc -
747 offsetof(struct ahash_edesc, hw_desc)); 740 offsetof(struct ahash_edesc, hw_desc));
748 if (err) { 741 if (err)
749 char tmp[CAAM_ERROR_STR_MAX]; 742 caam_jr_strstatus(jrdev, err);
750
751 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
752 }
753 743
754 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 744 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
755 kfree(edesc); 745 kfree(edesc);
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 3529b54048c9..8c07d3153f12 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -103,11 +103,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
103 bd = (struct buf_data *)((char *)desc - 103 bd = (struct buf_data *)((char *)desc -
104 offsetof(struct buf_data, hw_desc)); 104 offsetof(struct buf_data, hw_desc));
105 105
106 if (err) { 106 if (err)
107 char tmp[CAAM_ERROR_STR_MAX]; 107 caam_jr_strstatus(jrdev, err);
108
109 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
110 }
111 108
112 atomic_set(&bd->empty, BUF_NOT_EMPTY); 109 atomic_set(&bd->empty, BUF_NOT_EMPTY);
113 complete(&bd->filled); 110 complete(&bd->filled);
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 0eabd81e1a90..6531054a44c8 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -11,247 +11,208 @@
11#include "jr.h" 11#include "jr.h"
12#include "error.h" 12#include "error.h"
13 13
14#define SPRINTFCAT(str, format, param, max_alloc) \ 14static const struct {
15{ \ 15 u8 value;
16 char *tmp; \ 16 const char *error_text;
17 \ 17} desc_error_list[] = {
18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ 18 { 0x00, "No error." },
19 if (likely(tmp)) { \ 19 { 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." },
20 sprintf(tmp, format, param); \ 20 { 0x02, "SGT Null Entry Error." },
21 strcat(str, tmp); \ 21 { 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." },
22 kfree(tmp); \ 22 { 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." },
23 } else { \ 23 { 0x05, "Reserved." },
24 strcat(str, "kmalloc failure in SPRINTFCAT"); \ 24 { 0x06, "Invalid KEY Command" },
25 } \ 25 { 0x07, "Invalid LOAD Command" },
26} 26 { 0x08, "Invalid STORE Command" },
27 27 { 0x09, "Invalid OPERATION Command" },
28static void report_jump_idx(u32 status, char *outstr) 28 { 0x0A, "Invalid FIFO LOAD Command" },
29 { 0x0B, "Invalid FIFO STORE Command" },
30 { 0x0C, "Invalid MOVE/MOVE_LEN Command" },
31 { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." },
32 { 0x0E, "Invalid MATH Command" },
33 { 0x0F, "Invalid SIGNATURE Command" },
34 { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." },
35 { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
36 { 0x12, "Shared Descriptor Header Error" },
37 { 0x13, "Header Error. Invalid length or parity, or certain other problems." },
38 { 0x14, "Burster Error. Burster has gotten to an illegal state" },
39 { 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." },
40 { 0x16, "DMA Error" },
41 { 0x17, "Reserved." },
42 { 0x1A, "Job failed due to JR reset" },
43 { 0x1B, "Job failed due to Fail Mode" },
44 { 0x1C, "DECO Watchdog timer timeout error" },
45 { 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" },
46 { 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" },
47 { 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." },
48 { 0x20, "DECO has completed a reset initiated via the DRR register" },
49 { 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." },
50 { 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." },
51 { 0x23, "Read Input Frame error" },
52 { 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
53 { 0x80, "DNR (do not run) error" },
54 { 0x81, "undefined protocol command" },
55 { 0x82, "invalid setting in PDB" },
56 { 0x83, "Anti-replay LATE error" },
57 { 0x84, "Anti-replay REPLAY error" },
58 { 0x85, "Sequence number overflow" },
59 { 0x86, "Sigver invalid signature" },
60 { 0x87, "DSA Sign Illegal test descriptor" },
61 { 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." },
62 { 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." },
63 { 0xC1, "Blob Command error: Undefined mode" },
64 { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
65 { 0xC4, "Blob Command error: Black Blob key or input size error" },
66 { 0xC5, "Blob Command error: Invalid key destination" },
67 { 0xC8, "Blob Command error: Trusted/Secure mode error" },
68 { 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" },
69 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
70};
71
72static const char * const cha_id_list[] = {
73 "",
74 "AES",
75 "DES",
76 "ARC4",
77 "MDHA",
78 "RNG",
79 "SNOW f8",
80 "Kasumi f8/9",
81 "PKHA",
82 "CRCA",
83 "SNOW f9",
84 "ZUCE",
85 "ZUCA",
86};
87
88static const char * const err_id_list[] = {
89 "No error.",
90 "Mode error.",
91 "Data size error.",
92 "Key size error.",
93 "PKHA A memory size error.",
94 "PKHA B memory size error.",
95 "Data arrived out of sequence error.",
96 "PKHA divide-by-zero error.",
97 "PKHA modulus even error.",
98 "DES key parity error.",
99 "ICV check failed.",
100 "Hardware error.",
101 "Unsupported CCM AAD size.",
102 "Class 1 CHA is not reset",
103 "Invalid CHA combination was selected",
104 "Invalid CHA selected.",
105};
106
107static const char * const rng_err_id_list[] = {
108 "",
109 "",
110 "",
111 "Instantiate",
112 "Not instantiated",
113 "Test instantiate",
114 "Prediction resistance",
115 "Prediction resistance and test request",
116 "Uninstantiate",
117 "Secure key generation",
118};
119
120static void report_ccb_status(struct device *jrdev, const u32 status,
121 const char *error)
29{ 122{
123 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
124 JRSTA_CCBERR_CHAID_SHIFT;
125 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
30 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> 126 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
31 JRSTA_DECOERR_INDEX_SHIFT; 127 JRSTA_DECOERR_INDEX_SHIFT;
128 char *idx_str;
129 const char *cha_str = "unidentified cha_id value 0x";
130 char cha_err_code[3] = { 0 };
131 const char *err_str = "unidentified err_id value 0x";
132 char err_err_code[3] = { 0 };
32 133
33 if (status & JRSTA_DECOERR_JUMP) 134 if (status & JRSTA_DECOERR_JUMP)
34 strcat(outstr, "jump tgt desc idx "); 135 idx_str = "jump tgt desc idx";
35 else 136 else
36 strcat(outstr, "desc idx "); 137 idx_str = "desc idx";
37
38 SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
39}
40
41static void report_ccb_status(u32 status, char *outstr)
42{
43 static const char * const cha_id_list[] = {
44 "",
45 "AES",
46 "DES",
47 "ARC4",
48 "MDHA",
49 "RNG",
50 "SNOW f8",
51 "Kasumi f8/9",
52 "PKHA",
53 "CRCA",
54 "SNOW f9",
55 "ZUCE",
56 "ZUCA",
57 };
58 static const char * const err_id_list[] = {
59 "No error.",
60 "Mode error.",
61 "Data size error.",
62 "Key size error.",
63 "PKHA A memory size error.",
64 "PKHA B memory size error.",
65 "Data arrived out of sequence error.",
66 "PKHA divide-by-zero error.",
67 "PKHA modulus even error.",
68 "DES key parity error.",
69 "ICV check failed.",
70 "Hardware error.",
71 "Unsupported CCM AAD size.",
72 "Class 1 CHA is not reset",
73 "Invalid CHA combination was selected",
74 "Invalid CHA selected.",
75 };
76 static const char * const rng_err_id_list[] = {
77 "",
78 "",
79 "",
80 "Instantiate",
81 "Not instantiated",
82 "Test instantiate",
83 "Prediction resistance",
84 "Prediction resistance and test request",
85 "Uninstantiate",
86 "Secure key generation",
87 };
88 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
89 JRSTA_CCBERR_CHAID_SHIFT;
90 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
91 138
92 report_jump_idx(status, outstr); 139 if (cha_id < ARRAY_SIZE(cha_id_list))
93 140 cha_str = cha_id_list[cha_id];
94 if (cha_id < ARRAY_SIZE(cha_id_list)) { 141 else
95 SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id], 142 snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
96 strlen(cha_id_list[cha_id]));
97 } else {
98 SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
99 cha_id, sizeof("ff"));
100 }
101 143
102 if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG && 144 if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
103 err_id < ARRAY_SIZE(rng_err_id_list) && 145 err_id < ARRAY_SIZE(rng_err_id_list) &&
104 strlen(rng_err_id_list[err_id])) { 146 strlen(rng_err_id_list[err_id])) {
105 /* RNG-only error */ 147 /* RNG-only error */
106 SPRINTFCAT(outstr, "%s", rng_err_id_list[err_id], 148 err_str = rng_err_id_list[err_id];
107 strlen(rng_err_id_list[err_id])); 149 } else if (err_id < ARRAY_SIZE(err_id_list))
108 } else if (err_id < ARRAY_SIZE(err_id_list)) { 150 err_str = err_id_list[err_id];
109 SPRINTFCAT(outstr, "%s", err_id_list[err_id], 151 else
110 strlen(err_id_list[err_id])); 152 snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
111 } else { 153
112 SPRINTFCAT(outstr, "unidentified err_id value 0x%02x", 154 dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
113 err_id, sizeof("ff")); 155 status, error, idx_str, idx,
114 } 156 cha_str, cha_err_code,
157 err_str, err_err_code);
115} 158}
116 159
117static void report_jump_status(u32 status, char *outstr) 160static void report_jump_status(struct device *jrdev, const u32 status,
161 const char *error)
118{ 162{
119 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); 163 dev_err(jrdev, "%08x: %s: %s() not implemented\n",
164 status, error, __func__);
120} 165}
121 166
122static void report_deco_status(u32 status, char *outstr) 167static void report_deco_status(struct device *jrdev, const u32 status,
168 const char *error)
123{ 169{
124 static const struct { 170 u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
125 u8 value; 171 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
126 char *error_text; 172 JRSTA_DECOERR_INDEX_SHIFT;
127 } desc_error_list[] = { 173 char *idx_str;
128 { 0x00, "No error." }, 174 const char *err_str = "unidentified error value 0x";
129 { 0x01, "SGT Length Error. The descriptor is trying to read " 175 char err_err_code[3] = { 0 };
130 "more data than is contained in the SGT table." },
131 { 0x02, "SGT Null Entry Error." },
132 { 0x03, "Job Ring Control Error. There is a bad value in the "
133 "Job Ring Control register." },
134 { 0x04, "Invalid Descriptor Command. The Descriptor Command "
135 "field is invalid." },
136 { 0x05, "Reserved." },
137 { 0x06, "Invalid KEY Command" },
138 { 0x07, "Invalid LOAD Command" },
139 { 0x08, "Invalid STORE Command" },
140 { 0x09, "Invalid OPERATION Command" },
141 { 0x0A, "Invalid FIFO LOAD Command" },
142 { 0x0B, "Invalid FIFO STORE Command" },
143 { 0x0C, "Invalid MOVE/MOVE_LEN Command" },
144 { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
145 "invalid because the target is not a Job Header "
146 "Command, or the jump is from a Trusted Descriptor to "
147 "a Job Descriptor, or because the target Descriptor "
148 "contains a Shared Descriptor." },
149 { 0x0E, "Invalid MATH Command" },
150 { 0x0F, "Invalid SIGNATURE Command" },
151 { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
152 "Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
153 "LOAD, or SEQ FIFO STORE decremented the input or "
154 "output sequence length below 0. This error may result "
155 "if a built-in PROTOCOL Command has encountered a "
156 "malformed PDU." },
157 { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
158 { 0x12, "Shared Descriptor Header Error" },
159 { 0x13, "Header Error. Invalid length or parity, or certain "
160 "other problems." },
161 { 0x14, "Burster Error. Burster has gotten to an illegal "
162 "state" },
163 { 0x15, "Context Register Length Error. The descriptor is "
164 "trying to read or write past the end of the Context "
165 "Register. A SEQ LOAD or SEQ STORE with the VLF bit "
166 "set was executed with too large a length in the "
167 "variable length register (VSOL for SEQ STORE or VSIL "
168 "for SEQ LOAD)." },
169 { 0x16, "DMA Error" },
170 { 0x17, "Reserved." },
171 { 0x1A, "Job failed due to JR reset" },
172 { 0x1B, "Job failed due to Fail Mode" },
173 { 0x1C, "DECO Watchdog timer timeout error" },
174 { 0x1D, "DECO tried to copy a key from another DECO but the "
175 "other DECO's Key Registers were locked" },
176 { 0x1E, "DECO attempted to copy data from a DECO that had an "
177 "unmasked Descriptor error" },
178 { 0x1F, "LIODN error. DECO was trying to share from itself or "
179 "from another DECO but the two Non-SEQ LIODN values "
180 "didn't match or the 'shared from' DECO's Descriptor "
181 "required that the SEQ LIODNs be the same and they "
182 "aren't." },
183 { 0x20, "DECO has completed a reset initiated via the DRR "
184 "register" },
185 { 0x21, "Nonce error. When using EKT (CCM) key encryption "
186 "option in the FIFO STORE Command, the Nonce counter "
187 "reached its maximum value and this encryption mode "
188 "can no longer be used." },
189 { 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
190 "(input frame; block ciphers) and IPsec decap (output "
191 "frame, when doing the next header byte update) and "
192 "DCRC (output frame)." },
193 { 0x23, "Read Input Frame error" },
194 { 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
195 { 0x80, "DNR (do not run) error" },
196 { 0x81, "undefined protocol command" },
197 { 0x82, "invalid setting in PDB" },
198 { 0x83, "Anti-replay LATE error" },
199 { 0x84, "Anti-replay REPLAY error" },
200 { 0x85, "Sequence number overflow" },
201 { 0x86, "Sigver invalid signature" },
202 { 0x87, "DSA Sign Illegal test descriptor" },
203 { 0x88, "Protocol Format Error - A protocol has seen an error "
204 "in the format of data received. When running RSA, "
205 "this means that formatting with random padding was "
206 "used, and did not follow the form: 0x00, 0x02, 8-to-N "
207 "bytes of non-zero pad, 0x00, F data." },
208 { 0x89, "Protocol Size Error - A protocol has seen an error in "
209 "size. When running RSA, pdb size N < (size of F) when "
210 "no formatting is used; or pdb size N < (F + 11) when "
211 "formatting is used." },
212 { 0xC1, "Blob Command error: Undefined mode" },
213 { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
214 { 0xC4, "Blob Command error: Black Blob key or input size "
215 "error" },
216 { 0xC5, "Blob Command error: Invalid key destination" },
217 { 0xC8, "Blob Command error: Trusted/Secure mode error" },
218 { 0xF0, "IPsec TTL or hop limit field either came in as 0, "
219 "or was decremented to 0" },
220 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
221 };
222 u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
223 int i; 176 int i;
224 177
225 report_jump_idx(status, outstr); 178 if (status & JRSTA_DECOERR_JUMP)
179 idx_str = "jump tgt desc idx";
180 else
181 idx_str = "desc idx";
226 182
227 for (i = 0; i < ARRAY_SIZE(desc_error_list); i++) 183 for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
228 if (desc_error_list[i].value == desc_error) 184 if (desc_error_list[i].value == err_id)
229 break; 185 break;
230 186
231 if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) { 187 if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
232 SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text, 188 err_str = desc_error_list[i].error_text;
233 strlen(desc_error_list[i].error_text)); 189 else
234 } else { 190 snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
235 SPRINTFCAT(outstr, "unidentified error value 0x%02x", 191
236 desc_error, sizeof("ff")); 192 dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
237 } 193 status, error, idx_str, idx, err_str, err_err_code);
238} 194}
239 195
240static void report_jr_status(u32 status, char *outstr) 196static void report_jr_status(struct device *jrdev, const u32 status,
197 const char *error)
241{ 198{
242 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); 199 dev_err(jrdev, "%08x: %s: %s() not implemented\n",
200 status, error, __func__);
243} 201}
244 202
245static void report_cond_code_status(u32 status, char *outstr) 203static void report_cond_code_status(struct device *jrdev, const u32 status,
204 const char *error)
246{ 205{
247 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); 206 dev_err(jrdev, "%08x: %s: %s() not implemented\n",
207 status, error, __func__);
248} 208}
249 209
250char *caam_jr_strstatus(char *outstr, u32 status) 210void caam_jr_strstatus(struct device *jrdev, u32 status)
251{ 211{
252 static const struct stat_src { 212 static const struct stat_src {
253 void (*report_ssed)(u32 status, char *outstr); 213 void (*report_ssed)(struct device *jrdev, const u32 status,
254 char *error; 214 const char *error);
215 const char *error;
255 } status_src[] = { 216 } status_src[] = {
256 { NULL, "No error" }, 217 { NULL, "No error" },
257 { NULL, NULL }, 218 { NULL, NULL },
@@ -263,12 +224,16 @@ char *caam_jr_strstatus(char *outstr, u32 status)
263 { report_cond_code_status, "Condition Code" }, 224 { report_cond_code_status, "Condition Code" },
264 }; 225 };
265 u32 ssrc = status >> JRSTA_SSRC_SHIFT; 226 u32 ssrc = status >> JRSTA_SSRC_SHIFT;
266 227 const char *error = status_src[ssrc].error;
267 sprintf(outstr, "%s: ", status_src[ssrc].error); 228
268 229 /*
269 if (status_src[ssrc].report_ssed) 230 * If there is no further error handling function, just
270 status_src[ssrc].report_ssed(status, outstr); 231 * print the error code, error string and exit. Otherwise
271 232 * call the handler function.
272 return outstr; 233 */
234 if (!status_src[ssrc].report_ssed)
235 dev_err(jrdev, "%08x: %s: \n", status, status_src[ssrc].error);
236 else
237 status_src[ssrc].report_ssed(jrdev, status, error);
273} 238}
274EXPORT_SYMBOL(caam_jr_strstatus); 239EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 02c7baa1748e..b6350b0d9153 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,5 +7,5 @@
7#ifndef CAAM_ERROR_H 7#ifndef CAAM_ERROR_H
8#define CAAM_ERROR_H 8#define CAAM_ERROR_H
9#define CAAM_ERROR_STR_MAX 302 9#define CAAM_ERROR_STR_MAX 302
10extern char *caam_jr_strstatus(char *outstr, u32 status); 10void caam_jr_strstatus(struct device *jrdev, u32 status);
11#endif /* CAAM_ERROR_H */ 11#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index ea2e406610eb..871703c49d2c 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -19,11 +19,8 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
19 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 19 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20#endif 20#endif
21 21
22 if (err) { 22 if (err)
23 char tmp[CAAM_ERROR_STR_MAX]; 23 caam_jr_strstatus(dev, err);
24
25 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
26 }
27 24
28 res->err = err; 25 res->err = err;
29 26
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0237ab58f242..0cc5594b7de3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -191,12 +191,12 @@ static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
191 ctx->complete = ccp_aes_xts_complete; 191 ctx->complete = ccp_aes_xts_complete;
192 ctx->u.aes.key_len = 0; 192 ctx->u.aes.key_len = 0;
193 193
194 fallback_tfm = crypto_alloc_ablkcipher(tfm->__crt_alg->cra_name, 0, 194 fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0,
195 CRYPTO_ALG_ASYNC | 195 CRYPTO_ALG_ASYNC |
196 CRYPTO_ALG_NEED_FALLBACK); 196 CRYPTO_ALG_NEED_FALLBACK);
197 if (IS_ERR(fallback_tfm)) { 197 if (IS_ERR(fallback_tfm)) {
198 pr_warn("could not load fallback driver %s\n", 198 pr_warn("could not load fallback driver %s\n",
199 tfm->__crt_alg->cra_name); 199 crypto_tfm_alg_name(tfm));
200 return PTR_ERR(fallback_tfm); 200 return PTR_ERR(fallback_tfm);
201 } 201 }
202 ctx->u.aes.tfm_ablkcipher = fallback_tfm; 202 ctx->u.aes.tfm_ablkcipher = fallback_tfm;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 93319f9db753..0d746236df5e 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -48,12 +48,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
48 for (v = 0; v < ARRAY_SIZE(msix_entry); v++) 48 for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
49 msix_entry[v].entry = v; 49 msix_entry[v].entry = v;
50 50
51 while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0) 51 ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
52 v = ret; 52 if (ret < 0)
53 if (ret)
54 return ret; 53 return ret;
55 54
56 ccp_pci->msix_count = v; 55 ccp_pci->msix_count = ret;
57 for (v = 0; v < ccp_pci->msix_count; v++) { 56 for (v = 0; v < ccp_pci->msix_count; v++) {
58 /* Set the interrupt names and request the irqs */ 57 /* Set the interrupt names and request the irqs */
59 snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v); 58 snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 0c9ff4971724..fe538e5287a5 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -226,7 +226,7 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
226 op->dst = (void *) out; 226 op->dst = (void *) out;
227 op->mode = AES_MODE_ECB; 227 op->mode = AES_MODE_ECB;
228 op->flags = 0; 228 op->flags = 0;
229 op->len = AES_MIN_BLOCK_SIZE; 229 op->len = AES_BLOCK_SIZE;
230 op->dir = AES_DIR_ENCRYPT; 230 op->dir = AES_DIR_ENCRYPT;
231 231
232 geode_aes_crypt(op); 232 geode_aes_crypt(op);
@@ -247,7 +247,7 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
247 op->dst = (void *) out; 247 op->dst = (void *) out;
248 op->mode = AES_MODE_ECB; 248 op->mode = AES_MODE_ECB;
249 op->flags = 0; 249 op->flags = 0;
250 op->len = AES_MIN_BLOCK_SIZE; 250 op->len = AES_BLOCK_SIZE;
251 op->dir = AES_DIR_DECRYPT; 251 op->dir = AES_DIR_DECRYPT;
252 252
253 geode_aes_crypt(op); 253 geode_aes_crypt(op);
@@ -255,7 +255,7 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
255 255
256static int fallback_init_cip(struct crypto_tfm *tfm) 256static int fallback_init_cip(struct crypto_tfm *tfm)
257{ 257{
258 const char *name = tfm->__crt_alg->cra_name; 258 const char *name = crypto_tfm_alg_name(tfm);
259 struct geode_aes_op *op = crypto_tfm_ctx(tfm); 259 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
260 260
261 op->fallback.cip = crypto_alloc_cipher(name, 0, 261 op->fallback.cip = crypto_alloc_cipher(name, 0,
@@ -286,7 +286,7 @@ static struct crypto_alg geode_alg = {
286 CRYPTO_ALG_NEED_FALLBACK, 286 CRYPTO_ALG_NEED_FALLBACK,
287 .cra_init = fallback_init_cip, 287 .cra_init = fallback_init_cip,
288 .cra_exit = fallback_exit_cip, 288 .cra_exit = fallback_exit_cip,
289 .cra_blocksize = AES_MIN_BLOCK_SIZE, 289 .cra_blocksize = AES_BLOCK_SIZE,
290 .cra_ctxsize = sizeof(struct geode_aes_op), 290 .cra_ctxsize = sizeof(struct geode_aes_op),
291 .cra_module = THIS_MODULE, 291 .cra_module = THIS_MODULE,
292 .cra_u = { 292 .cra_u = {
@@ -320,7 +320,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
320 op->src = walk.src.virt.addr, 320 op->src = walk.src.virt.addr,
321 op->dst = walk.dst.virt.addr; 321 op->dst = walk.dst.virt.addr;
322 op->mode = AES_MODE_CBC; 322 op->mode = AES_MODE_CBC;
323 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); 323 op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
324 op->dir = AES_DIR_DECRYPT; 324 op->dir = AES_DIR_DECRYPT;
325 325
326 ret = geode_aes_crypt(op); 326 ret = geode_aes_crypt(op);
@@ -352,7 +352,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
352 op->src = walk.src.virt.addr, 352 op->src = walk.src.virt.addr,
353 op->dst = walk.dst.virt.addr; 353 op->dst = walk.dst.virt.addr;
354 op->mode = AES_MODE_CBC; 354 op->mode = AES_MODE_CBC;
355 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); 355 op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
356 op->dir = AES_DIR_ENCRYPT; 356 op->dir = AES_DIR_ENCRYPT;
357 357
358 ret = geode_aes_crypt(op); 358 ret = geode_aes_crypt(op);
@@ -365,7 +365,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
365 365
366static int fallback_init_blk(struct crypto_tfm *tfm) 366static int fallback_init_blk(struct crypto_tfm *tfm)
367{ 367{
368 const char *name = tfm->__crt_alg->cra_name; 368 const char *name = crypto_tfm_alg_name(tfm);
369 struct geode_aes_op *op = crypto_tfm_ctx(tfm); 369 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
370 370
371 op->fallback.blk = crypto_alloc_blkcipher(name, 0, 371 op->fallback.blk = crypto_alloc_blkcipher(name, 0,
@@ -396,7 +396,7 @@ static struct crypto_alg geode_cbc_alg = {
396 CRYPTO_ALG_NEED_FALLBACK, 396 CRYPTO_ALG_NEED_FALLBACK,
397 .cra_init = fallback_init_blk, 397 .cra_init = fallback_init_blk,
398 .cra_exit = fallback_exit_blk, 398 .cra_exit = fallback_exit_blk,
399 .cra_blocksize = AES_MIN_BLOCK_SIZE, 399 .cra_blocksize = AES_BLOCK_SIZE,
400 .cra_ctxsize = sizeof(struct geode_aes_op), 400 .cra_ctxsize = sizeof(struct geode_aes_op),
401 .cra_alignmask = 15, 401 .cra_alignmask = 15,
402 .cra_type = &crypto_blkcipher_type, 402 .cra_type = &crypto_blkcipher_type,
@@ -408,7 +408,7 @@ static struct crypto_alg geode_cbc_alg = {
408 .setkey = geode_setkey_blk, 408 .setkey = geode_setkey_blk,
409 .encrypt = geode_cbc_encrypt, 409 .encrypt = geode_cbc_encrypt,
410 .decrypt = geode_cbc_decrypt, 410 .decrypt = geode_cbc_decrypt,
411 .ivsize = AES_IV_LENGTH, 411 .ivsize = AES_BLOCK_SIZE,
412 } 412 }
413 } 413 }
414}; 414};
@@ -432,7 +432,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
432 op->src = walk.src.virt.addr, 432 op->src = walk.src.virt.addr,
433 op->dst = walk.dst.virt.addr; 433 op->dst = walk.dst.virt.addr;
434 op->mode = AES_MODE_ECB; 434 op->mode = AES_MODE_ECB;
435 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); 435 op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
436 op->dir = AES_DIR_DECRYPT; 436 op->dir = AES_DIR_DECRYPT;
437 437
438 ret = geode_aes_crypt(op); 438 ret = geode_aes_crypt(op);
@@ -462,7 +462,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
462 op->src = walk.src.virt.addr, 462 op->src = walk.src.virt.addr,
463 op->dst = walk.dst.virt.addr; 463 op->dst = walk.dst.virt.addr;
464 op->mode = AES_MODE_ECB; 464 op->mode = AES_MODE_ECB;
465 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); 465 op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
466 op->dir = AES_DIR_ENCRYPT; 466 op->dir = AES_DIR_ENCRYPT;
467 467
468 ret = geode_aes_crypt(op); 468 ret = geode_aes_crypt(op);
@@ -482,7 +482,7 @@ static struct crypto_alg geode_ecb_alg = {
482 CRYPTO_ALG_NEED_FALLBACK, 482 CRYPTO_ALG_NEED_FALLBACK,
483 .cra_init = fallback_init_blk, 483 .cra_init = fallback_init_blk,
484 .cra_exit = fallback_exit_blk, 484 .cra_exit = fallback_exit_blk,
485 .cra_blocksize = AES_MIN_BLOCK_SIZE, 485 .cra_blocksize = AES_BLOCK_SIZE,
486 .cra_ctxsize = sizeof(struct geode_aes_op), 486 .cra_ctxsize = sizeof(struct geode_aes_op),
487 .cra_alignmask = 15, 487 .cra_alignmask = 15,
488 .cra_type = &crypto_blkcipher_type, 488 .cra_type = &crypto_blkcipher_type,
@@ -547,7 +547,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
547 if (ret) 547 if (ret)
548 goto eecb; 548 goto eecb;
549 549
550 printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n"); 550 dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
551 return 0; 551 return 0;
552 552
553 eecb: 553 eecb:
@@ -565,7 +565,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
565 eenable: 565 eenable:
566 pci_disable_device(dev); 566 pci_disable_device(dev);
567 567
568 printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n"); 568 dev_err(&dev->dev, "GEODE AES initialization failed.\n");
569 return ret; 569 return ret;
570} 570}
571 571
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
index f1855b50da48..f442ca972e3c 100644
--- a/drivers/crypto/geode-aes.h
+++ b/drivers/crypto/geode-aes.h
@@ -10,10 +10,6 @@
10#define _GEODE_AES_H_ 10#define _GEODE_AES_H_
11 11
12/* driver logic flags */ 12/* driver logic flags */
13#define AES_IV_LENGTH 16
14#define AES_KEY_LENGTH 16
15#define AES_MIN_BLOCK_SIZE 16
16
17#define AES_MODE_ECB 0 13#define AES_MODE_ECB 0
18#define AES_MODE_CBC 1 14#define AES_MODE_CBC 1
19 15
@@ -64,7 +60,7 @@ struct geode_aes_op {
64 u32 flags; 60 u32 flags;
65 int len; 61 int len;
66 62
67 u8 key[AES_KEY_LENGTH]; 63 u8 key[AES_KEYSIZE_128];
68 u8 *iv; 64 u8 *iv;
69 65
70 union { 66 union {
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 8d1e6f8e9e9c..29d0ee504907 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -622,8 +622,8 @@ static int queue_manag(void *data)
622 } 622 }
623 623
624 if (async_req) { 624 if (async_req) {
625 if (async_req->tfm->__crt_alg->cra_type != 625 if (crypto_tfm_alg_type(async_req->tfm) !=
626 &crypto_ahash_type) { 626 CRYPTO_ALG_TYPE_AHASH) {
627 struct ablkcipher_request *req = 627 struct ablkcipher_request *req =
628 ablkcipher_request_cast(async_req); 628 ablkcipher_request_cast(async_req);
629 mv_start_new_crypt_req(req); 629 mv_start_new_crypt_req(req);
@@ -843,7 +843,7 @@ static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
843static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, 843static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
844 enum hash_op op, int count_add) 844 enum hash_op op, int count_add)
845{ 845{
846 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 846 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
847 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); 847 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
848 struct crypto_shash *fallback_tfm = NULL; 848 struct crypto_shash *fallback_tfm = NULL;
849 struct crypto_shash *base_hash = NULL; 849 struct crypto_shash *base_hash = NULL;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 7bbe0ab21eca..b5f7e6db24d4 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -104,7 +104,6 @@ struct dcp_sha_req_ctx {
104 * design of Linux Crypto API. 104 * design of Linux Crypto API.
105 */ 105 */
106static struct dcp *global_sdcp; 106static struct dcp *global_sdcp;
107static DEFINE_MUTEX(global_mutex);
108 107
109/* DCP register layout. */ 108/* DCP register layout. */
110#define MXS_DCP_CTRL 0x00 109#define MXS_DCP_CTRL 0x00
@@ -482,7 +481,7 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
482 481
483static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) 482static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
484{ 483{
485 const char *name = tfm->__crt_alg->cra_name; 484 const char *name = crypto_tfm_alg_name(tfm);
486 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; 485 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
487 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 486 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
488 struct crypto_ablkcipher *blk; 487 struct crypto_ablkcipher *blk;
@@ -907,60 +906,49 @@ static int mxs_dcp_probe(struct platform_device *pdev)
907 struct resource *iores; 906 struct resource *iores;
908 int dcp_vmi_irq, dcp_irq; 907 int dcp_vmi_irq, dcp_irq;
909 908
910 mutex_lock(&global_mutex);
911 if (global_sdcp) { 909 if (global_sdcp) {
912 dev_err(dev, "Only one DCP instance allowed!\n"); 910 dev_err(dev, "Only one DCP instance allowed!\n");
913 ret = -ENODEV; 911 return -ENODEV;
914 goto err_mutex;
915 } 912 }
916 913
917 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 914 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
918 dcp_vmi_irq = platform_get_irq(pdev, 0); 915 dcp_vmi_irq = platform_get_irq(pdev, 0);
919 if (dcp_vmi_irq < 0) { 916 if (dcp_vmi_irq < 0)
920 ret = dcp_vmi_irq; 917 return dcp_vmi_irq;
921 goto err_mutex;
922 }
923 918
924 dcp_irq = platform_get_irq(pdev, 1); 919 dcp_irq = platform_get_irq(pdev, 1);
925 if (dcp_irq < 0) { 920 if (dcp_irq < 0)
926 ret = dcp_irq; 921 return dcp_irq;
927 goto err_mutex;
928 }
929 922
930 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 923 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
931 if (!sdcp) { 924 if (!sdcp)
932 ret = -ENOMEM; 925 return -ENOMEM;
933 goto err_mutex;
934 }
935 926
936 sdcp->dev = dev; 927 sdcp->dev = dev;
937 sdcp->base = devm_ioremap_resource(dev, iores); 928 sdcp->base = devm_ioremap_resource(dev, iores);
938 if (IS_ERR(sdcp->base)) { 929 if (IS_ERR(sdcp->base))
939 ret = PTR_ERR(sdcp->base); 930 return PTR_ERR(sdcp->base);
940 goto err_mutex; 931
941 }
942 932
943 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, 933 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
944 "dcp-vmi-irq", sdcp); 934 "dcp-vmi-irq", sdcp);
945 if (ret) { 935 if (ret) {
946 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); 936 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
947 goto err_mutex; 937 return ret;
948 } 938 }
949 939
950 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, 940 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
951 "dcp-irq", sdcp); 941 "dcp-irq", sdcp);
952 if (ret) { 942 if (ret) {
953 dev_err(dev, "Failed to claim DCP IRQ!\n"); 943 dev_err(dev, "Failed to claim DCP IRQ!\n");
954 goto err_mutex; 944 return ret;
955 } 945 }
956 946
957 /* Allocate coherent helper block. */ 947 /* Allocate coherent helper block. */
958 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, 948 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
959 GFP_KERNEL); 949 GFP_KERNEL);
960 if (!sdcp->coh) { 950 if (!sdcp->coh)
961 ret = -ENOMEM; 951 return -ENOMEM;
962 goto err_mutex;
963 }
964 952
965 /* Re-align the structure so it fits the DCP constraints. */ 953 /* Re-align the structure so it fits the DCP constraints. */
966 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); 954 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
@@ -968,7 +956,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
968 /* Restart the DCP block. */ 956 /* Restart the DCP block. */
969 ret = stmp_reset_block(sdcp->base); 957 ret = stmp_reset_block(sdcp->base);
970 if (ret) 958 if (ret)
971 goto err_mutex; 959 return ret;
972 960
973 /* Initialize control register. */ 961 /* Initialize control register. */
974 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | 962 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
@@ -1006,8 +994,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
1006 NULL, "mxs_dcp_chan/sha"); 994 NULL, "mxs_dcp_chan/sha");
1007 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { 995 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1008 dev_err(dev, "Error starting SHA thread!\n"); 996 dev_err(dev, "Error starting SHA thread!\n");
1009 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); 997 return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1010 goto err_mutex;
1011 } 998 }
1012 999
1013 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, 1000 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
@@ -1064,9 +1051,6 @@ err_destroy_aes_thread:
1064 1051
1065err_destroy_sha_thread: 1052err_destroy_sha_thread:
1066 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); 1053 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1067
1068err_mutex:
1069 mutex_unlock(&global_mutex);
1070 return ret; 1054 return ret;
1071} 1055}
1072 1056
@@ -1088,9 +1072,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
1088 1072
1089 platform_set_drvdata(pdev, NULL); 1073 platform_set_drvdata(pdev, NULL);
1090 1074
1091 mutex_lock(&global_mutex);
1092 global_sdcp = NULL; 1075 global_sdcp = NULL;
1093 mutex_unlock(&global_mutex);
1094 1076
1095 return 0; 1077 return 0;
1096} 1078}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index e1f0ab413c3b..7263c10a56ee 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -356,7 +356,7 @@ static int n2_hash_async_finup(struct ahash_request *req)
356 356
357static int n2_hash_cra_init(struct crypto_tfm *tfm) 357static int n2_hash_cra_init(struct crypto_tfm *tfm)
358{ 358{
359 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 359 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
360 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 360 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
361 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 361 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
362 struct crypto_ahash *fallback_tfm; 362 struct crypto_ahash *fallback_tfm;
@@ -391,7 +391,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
391 391
392static int n2_hmac_cra_init(struct crypto_tfm *tfm) 392static int n2_hmac_cra_init(struct crypto_tfm *tfm)
393{ 393{
394 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 394 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
395 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 395 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
396 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 396 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
397 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); 397 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 5ce8b5765121..502edf0a2933 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1229,7 +1229,7 @@ static int __exit nx842_remove(struct vio_dev *viodev)
1229 old_devdata = rcu_dereference_check(devdata, 1229 old_devdata = rcu_dereference_check(devdata,
1230 lockdep_is_held(&devdata_mutex)); 1230 lockdep_is_held(&devdata_mutex));
1231 of_reconfig_notifier_unregister(&nx842_of_nb); 1231 of_reconfig_notifier_unregister(&nx842_of_nb);
1232 rcu_assign_pointer(devdata, NULL); 1232 RCU_INIT_POINTER(devdata, NULL);
1233 spin_unlock_irqrestore(&devdata_mutex, flags); 1233 spin_unlock_irqrestore(&devdata_mutex, flags);
1234 synchronize_rcu(); 1234 synchronize_rcu();
1235 dev_set_drvdata(&viodev->dev, NULL); 1235 dev_set_drvdata(&viodev->dev, NULL);
@@ -1280,7 +1280,7 @@ static void __exit nx842_exit(void)
1280 spin_lock_irqsave(&devdata_mutex, flags); 1280 spin_lock_irqsave(&devdata_mutex, flags);
1281 old_devdata = rcu_dereference_check(devdata, 1281 old_devdata = rcu_dereference_check(devdata,
1282 lockdep_is_held(&devdata_mutex)); 1282 lockdep_is_held(&devdata_mutex));
1283 rcu_assign_pointer(devdata, NULL); 1283 RCU_INIT_POINTER(devdata, NULL);
1284 spin_unlock_irqrestore(&devdata_mutex, flags); 1284 spin_unlock_irqrestore(&devdata_mutex, flags);
1285 synchronize_rcu(); 1285 synchronize_rcu();
1286 if (old_devdata) 1286 if (old_devdata)
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index ec5f13162b73..b8bc84be8741 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -223,12 +223,19 @@ static void omap_des_write_n(struct omap_des_dev *dd, u32 offset,
223 223
224static int omap_des_hw_init(struct omap_des_dev *dd) 224static int omap_des_hw_init(struct omap_des_dev *dd)
225{ 225{
226 int err;
227
226 /* 228 /*
227 * clocks are enabled when request starts and disabled when finished. 229 * clocks are enabled when request starts and disabled when finished.
228 * It may be long delays between requests. 230 * It may be long delays between requests.
229 * Device might go to off mode to save power. 231 * Device might go to off mode to save power.
230 */ 232 */
231 pm_runtime_get_sync(dd->dev); 233 err = pm_runtime_get_sync(dd->dev);
234 if (err < 0) {
235 pm_runtime_put_noidle(dd->dev);
236 dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
237 return err;
238 }
232 239
233 if (!(dd->flags & FLAGS_INIT)) { 240 if (!(dd->flags & FLAGS_INIT)) {
234 dd->flags |= FLAGS_INIT; 241 dd->flags |= FLAGS_INIT;
@@ -1074,16 +1081,20 @@ static int omap_des_probe(struct platform_device *pdev)
1074 if (err) 1081 if (err)
1075 goto err_res; 1082 goto err_res;
1076 1083
1077 dd->io_base = devm_request_and_ioremap(dev, res); 1084 dd->io_base = devm_ioremap_resource(dev, res);
1078 if (!dd->io_base) { 1085 if (IS_ERR(dd->io_base)) {
1079 dev_err(dev, "can't ioremap\n"); 1086 err = PTR_ERR(dd->io_base);
1080 err = -ENOMEM;
1081 goto err_res; 1087 goto err_res;
1082 } 1088 }
1083 dd->phys_base = res->start; 1089 dd->phys_base = res->start;
1084 1090
1085 pm_runtime_enable(dev); 1091 pm_runtime_enable(dev);
1086 pm_runtime_get_sync(dev); 1092 err = pm_runtime_get_sync(dev);
1093 if (err < 0) {
1094 pm_runtime_put_noidle(dev);
1095 dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
1096 goto err_get;
1097 }
1087 1098
1088 omap_des_dma_stop(dd); 1099 omap_des_dma_stop(dd);
1089 1100
@@ -1148,6 +1159,7 @@ err_algs:
1148err_irq: 1159err_irq:
1149 tasklet_kill(&dd->done_task); 1160 tasklet_kill(&dd->done_task);
1150 tasklet_kill(&dd->queue_task); 1161 tasklet_kill(&dd->queue_task);
1162err_get:
1151 pm_runtime_disable(dev); 1163 pm_runtime_disable(dev);
1152err_res: 1164err_res:
1153 dd = NULL; 1165 dd = NULL;
@@ -1191,7 +1203,14 @@ static int omap_des_suspend(struct device *dev)
1191 1203
1192static int omap_des_resume(struct device *dev) 1204static int omap_des_resume(struct device *dev)
1193{ 1205{
1194 pm_runtime_get_sync(dev); 1206 int err;
1207
1208 err = pm_runtime_get_sync(dev);
1209 if (err < 0) {
1210 pm_runtime_put_noidle(dev);
1211 dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err);
1212 return err;
1213 }
1195 return 0; 1214 return 0;
1196} 1215}
1197#endif 1216#endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 9266c0e25492..bace885634f2 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -211,7 +211,7 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
211static int padlock_cra_init(struct crypto_tfm *tfm) 211static int padlock_cra_init(struct crypto_tfm *tfm)
212{ 212{
213 struct crypto_shash *hash = __crypto_shash_cast(tfm); 213 struct crypto_shash *hash = __crypto_shash_cast(tfm);
214 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 214 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
215 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); 215 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
216 struct crypto_shash *fallback_tfm; 216 struct crypto_shash *fallback_tfm;
217 int err = -ENOMEM; 217 int err = -ENOMEM;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index be45762f390a..4197ad9a711b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -22,6 +22,7 @@
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/of.h>
25#include <linux/crypto.h> 26#include <linux/crypto.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
27 28
@@ -29,9 +30,6 @@
29#include <crypto/aes.h> 30#include <crypto/aes.h>
30#include <crypto/ctr.h> 31#include <crypto/ctr.h>
31 32
32#include <plat/cpu.h>
33#include <mach/dma.h>
34
35#define _SBF(s, v) ((v) << (s)) 33#define _SBF(s, v) ((v) << (s))
36#define _BIT(b) _SBF(b, 1) 34#define _BIT(b) _SBF(b, 1)
37 35
@@ -105,7 +103,7 @@
105#define SSS_REG_FCPKDMAO 0x005C 103#define SSS_REG_FCPKDMAO 0x005C
106 104
107/* AES registers */ 105/* AES registers */
108#define SSS_REG_AES_CONTROL 0x4000 106#define SSS_REG_AES_CONTROL 0x00
109#define SSS_AES_BYTESWAP_DI _BIT(11) 107#define SSS_AES_BYTESWAP_DI _BIT(11)
110#define SSS_AES_BYTESWAP_DO _BIT(10) 108#define SSS_AES_BYTESWAP_DO _BIT(10)
111#define SSS_AES_BYTESWAP_IV _BIT(9) 109#define SSS_AES_BYTESWAP_IV _BIT(9)
@@ -121,21 +119,25 @@
121#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) 119#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
122#define SSS_AES_MODE_DECRYPT _BIT(0) 120#define SSS_AES_MODE_DECRYPT _BIT(0)
123 121
124#define SSS_REG_AES_STATUS 0x4004 122#define SSS_REG_AES_STATUS 0x04
125#define SSS_AES_BUSY _BIT(2) 123#define SSS_AES_BUSY _BIT(2)
126#define SSS_AES_INPUT_READY _BIT(1) 124#define SSS_AES_INPUT_READY _BIT(1)
127#define SSS_AES_OUTPUT_READY _BIT(0) 125#define SSS_AES_OUTPUT_READY _BIT(0)
128 126
129#define SSS_REG_AES_IN_DATA(s) (0x4010 + (s << 2)) 127#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
130#define SSS_REG_AES_OUT_DATA(s) (0x4020 + (s << 2)) 128#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
131#define SSS_REG_AES_IV_DATA(s) (0x4030 + (s << 2)) 129#define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
132#define SSS_REG_AES_CNT_DATA(s) (0x4040 + (s << 2)) 130#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
133#define SSS_REG_AES_KEY_DATA(s) (0x4080 + (s << 2)) 131#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
134 132
135#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) 133#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
136#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) 134#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
137#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) 135#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
138 136
137#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
138#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
139 SSS_AES_REG(dev, reg))
140
139/* HW engine modes */ 141/* HW engine modes */
140#define FLAGS_AES_DECRYPT _BIT(0) 142#define FLAGS_AES_DECRYPT _BIT(0)
141#define FLAGS_AES_MODE_MASK _SBF(1, 0x03) 143#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
@@ -145,6 +147,20 @@
145#define AES_KEY_LEN 16 147#define AES_KEY_LEN 16
146#define CRYPTO_QUEUE_LEN 1 148#define CRYPTO_QUEUE_LEN 1
147 149
150/**
151 * struct samsung_aes_variant - platform specific SSS driver data
152 * @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
153 * @aes_offset: AES register offset from SSS module's base.
154 *
155 * Specifies platform specific configuration of SSS module.
156 * Note: A structure for driver specific platform data is used for future
157 * expansion of its usage.
158 */
159struct samsung_aes_variant {
160 bool has_hash_irq;
161 unsigned int aes_offset;
162};
163
148struct s5p_aes_reqctx { 164struct s5p_aes_reqctx {
149 unsigned long mode; 165 unsigned long mode;
150}; 166};
@@ -161,6 +177,7 @@ struct s5p_aes_dev {
161 struct device *dev; 177 struct device *dev;
162 struct clk *clk; 178 struct clk *clk;
163 void __iomem *ioaddr; 179 void __iomem *ioaddr;
180 void __iomem *aes_ioaddr;
164 int irq_hash; 181 int irq_hash;
165 int irq_fc; 182 int irq_fc;
166 183
@@ -173,10 +190,48 @@ struct s5p_aes_dev {
173 struct crypto_queue queue; 190 struct crypto_queue queue;
174 bool busy; 191 bool busy;
175 spinlock_t lock; 192 spinlock_t lock;
193
194 struct samsung_aes_variant *variant;
176}; 195};
177 196
178static struct s5p_aes_dev *s5p_dev; 197static struct s5p_aes_dev *s5p_dev;
179 198
199static const struct samsung_aes_variant s5p_aes_data = {
200 .has_hash_irq = true,
201 .aes_offset = 0x4000,
202};
203
204static const struct samsung_aes_variant exynos_aes_data = {
205 .has_hash_irq = false,
206 .aes_offset = 0x200,
207};
208
209static const struct of_device_id s5p_sss_dt_match[] = {
210 {
211 .compatible = "samsung,s5pv210-secss",
212 .data = &s5p_aes_data,
213 },
214 {
215 .compatible = "samsung,exynos4210-secss",
216 .data = &exynos_aes_data,
217 },
218 { },
219};
220MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
221
222static inline struct samsung_aes_variant *find_s5p_sss_version
223 (struct platform_device *pdev)
224{
225 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
226 const struct of_device_id *match;
227 match = of_match_node(s5p_sss_dt_match,
228 pdev->dev.of_node);
229 return (struct samsung_aes_variant *)match->data;
230 }
231 return (struct samsung_aes_variant *)
232 platform_get_device_id(pdev)->driver_data;
233}
234
180static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 235static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
181{ 236{
182 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); 237 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
@@ -272,8 +327,12 @@ static void s5p_aes_tx(struct s5p_aes_dev *dev)
272 } 327 }
273 328
274 s5p_set_dma_outdata(dev, dev->sg_dst); 329 s5p_set_dma_outdata(dev, dev->sg_dst);
275 } else 330 } else {
276 s5p_aes_complete(dev, err); 331 s5p_aes_complete(dev, err);
332
333 dev->busy = true;
334 tasklet_schedule(&dev->tasklet);
335 }
277} 336}
278 337
279static void s5p_aes_rx(struct s5p_aes_dev *dev) 338static void s5p_aes_rx(struct s5p_aes_dev *dev)
@@ -322,14 +381,15 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
322{ 381{
323 void __iomem *keystart; 382 void __iomem *keystart;
324 383
325 memcpy(dev->ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); 384 if (iv)
385 memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
326 386
327 if (keylen == AES_KEYSIZE_256) 387 if (keylen == AES_KEYSIZE_256)
328 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(0); 388 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
329 else if (keylen == AES_KEYSIZE_192) 389 else if (keylen == AES_KEYSIZE_192)
330 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(2); 390 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
331 else 391 else
332 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(4); 392 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
333 393
334 memcpy(keystart, key, keylen); 394 memcpy(keystart, key, keylen);
335} 395}
@@ -379,7 +439,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
379 if (err) 439 if (err)
380 goto outdata_error; 440 goto outdata_error;
381 441
382 SSS_WRITE(dev, AES_CONTROL, aes_control); 442 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
383 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); 443 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
384 444
385 s5p_set_dma_indata(dev, req->src); 445 s5p_set_dma_indata(dev, req->src);
@@ -410,10 +470,13 @@ static void s5p_tasklet_cb(unsigned long data)
410 spin_lock_irqsave(&dev->lock, flags); 470 spin_lock_irqsave(&dev->lock, flags);
411 backlog = crypto_get_backlog(&dev->queue); 471 backlog = crypto_get_backlog(&dev->queue);
412 async_req = crypto_dequeue_request(&dev->queue); 472 async_req = crypto_dequeue_request(&dev->queue);
413 spin_unlock_irqrestore(&dev->lock, flags);
414 473
415 if (!async_req) 474 if (!async_req) {
475 dev->busy = false;
476 spin_unlock_irqrestore(&dev->lock, flags);
416 return; 477 return;
478 }
479 spin_unlock_irqrestore(&dev->lock, flags);
417 480
418 if (backlog) 481 if (backlog)
419 backlog->complete(backlog, -EINPROGRESS); 482 backlog->complete(backlog, -EINPROGRESS);
@@ -432,14 +495,13 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
432 int err; 495 int err;
433 496
434 spin_lock_irqsave(&dev->lock, flags); 497 spin_lock_irqsave(&dev->lock, flags);
498 err = ablkcipher_enqueue_request(&dev->queue, req);
435 if (dev->busy) { 499 if (dev->busy) {
436 err = -EAGAIN;
437 spin_unlock_irqrestore(&dev->lock, flags); 500 spin_unlock_irqrestore(&dev->lock, flags);
438 goto exit; 501 goto exit;
439 } 502 }
440 dev->busy = true; 503 dev->busy = true;
441 504
442 err = ablkcipher_enqueue_request(&dev->queue, req);
443 spin_unlock_irqrestore(&dev->lock, flags); 505 spin_unlock_irqrestore(&dev->lock, flags);
444 506
445 tasklet_schedule(&dev->tasklet); 507 tasklet_schedule(&dev->tasklet);
@@ -564,6 +626,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
564 struct s5p_aes_dev *pdata; 626 struct s5p_aes_dev *pdata;
565 struct device *dev = &pdev->dev; 627 struct device *dev = &pdev->dev;
566 struct resource *res; 628 struct resource *res;
629 struct samsung_aes_variant *variant;
567 630
568 if (s5p_dev) 631 if (s5p_dev)
569 return -EEXIST; 632 return -EEXIST;
@@ -577,30 +640,25 @@ static int s5p_aes_probe(struct platform_device *pdev)
577 if (IS_ERR(pdata->ioaddr)) 640 if (IS_ERR(pdata->ioaddr))
578 return PTR_ERR(pdata->ioaddr); 641 return PTR_ERR(pdata->ioaddr);
579 642
643 variant = find_s5p_sss_version(pdev);
644
580 pdata->clk = devm_clk_get(dev, "secss"); 645 pdata->clk = devm_clk_get(dev, "secss");
581 if (IS_ERR(pdata->clk)) { 646 if (IS_ERR(pdata->clk)) {
582 dev_err(dev, "failed to find secss clock source\n"); 647 dev_err(dev, "failed to find secss clock source\n");
583 return -ENOENT; 648 return -ENOENT;
584 } 649 }
585 650
586 clk_enable(pdata->clk); 651 err = clk_prepare_enable(pdata->clk);
652 if (err < 0) {
653 dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
654 return err;
655 }
587 656
588 spin_lock_init(&pdata->lock); 657 spin_lock_init(&pdata->lock);
589 658
590 pdata->irq_hash = platform_get_irq_byname(pdev, "hash"); 659 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
591 if (pdata->irq_hash < 0) {
592 err = pdata->irq_hash;
593 dev_warn(dev, "hash interrupt is not available.\n");
594 goto err_irq;
595 }
596 err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
597 IRQF_SHARED, pdev->name, pdev);
598 if (err < 0) {
599 dev_warn(dev, "hash interrupt is not available.\n");
600 goto err_irq;
601 }
602 660
603 pdata->irq_fc = platform_get_irq_byname(pdev, "feed control"); 661 pdata->irq_fc = platform_get_irq(pdev, 0);
604 if (pdata->irq_fc < 0) { 662 if (pdata->irq_fc < 0) {
605 err = pdata->irq_fc; 663 err = pdata->irq_fc;
606 dev_warn(dev, "feed control interrupt is not available.\n"); 664 dev_warn(dev, "feed control interrupt is not available.\n");
@@ -613,6 +671,23 @@ static int s5p_aes_probe(struct platform_device *pdev)
613 goto err_irq; 671 goto err_irq;
614 } 672 }
615 673
674 if (variant->has_hash_irq) {
675 pdata->irq_hash = platform_get_irq(pdev, 1);
676 if (pdata->irq_hash < 0) {
677 err = pdata->irq_hash;
678 dev_warn(dev, "hash interrupt is not available.\n");
679 goto err_irq;
680 }
681 err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
682 IRQF_SHARED, pdev->name, pdev);
683 if (err < 0) {
684 dev_warn(dev, "hash interrupt is not available.\n");
685 goto err_irq;
686 }
687 }
688
689 pdata->busy = false;
690 pdata->variant = variant;
616 pdata->dev = dev; 691 pdata->dev = dev;
617 platform_set_drvdata(pdev, pdata); 692 platform_set_drvdata(pdev, pdata);
618 s5p_dev = pdata; 693 s5p_dev = pdata;
@@ -639,7 +714,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
639 tasklet_kill(&pdata->tasklet); 714 tasklet_kill(&pdata->tasklet);
640 715
641 err_irq: 716 err_irq:
642 clk_disable(pdata->clk); 717 clk_disable_unprepare(pdata->clk);
643 718
644 s5p_dev = NULL; 719 s5p_dev = NULL;
645 720
@@ -659,7 +734,7 @@ static int s5p_aes_remove(struct platform_device *pdev)
659 734
660 tasklet_kill(&pdata->tasklet); 735 tasklet_kill(&pdata->tasklet);
661 736
662 clk_disable(pdata->clk); 737 clk_disable_unprepare(pdata->clk);
663 738
664 s5p_dev = NULL; 739 s5p_dev = NULL;
665 740
@@ -672,6 +747,7 @@ static struct platform_driver s5p_aes_crypto = {
672 .driver = { 747 .driver = {
673 .owner = THIS_MODULE, 748 .owner = THIS_MODULE,
674 .name = "s5p-secss", 749 .name = "s5p-secss",
750 .of_match_table = s5p_sss_dt_match,
675 }, 751 },
676}; 752};
677 753
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 07a5987ce67d..164e1ec624e3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -728,7 +728,7 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
728 728
729static int sahara_aes_cra_init(struct crypto_tfm *tfm) 729static int sahara_aes_cra_init(struct crypto_tfm *tfm)
730{ 730{
731 const char *name = tfm->__crt_alg->cra_name; 731 const char *name = crypto_tfm_alg_name(tfm);
732 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); 732 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
733 733
734 ctx->fallback = crypto_alloc_ablkcipher(name, 0, 734 ctx->fallback = crypto_alloc_ablkcipher(name, 0,