diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-05 15:12:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-05 15:12:33 -0400 |
commit | b2c311075db578f1433d9b303698491bfa21279a (patch) | |
tree | 41d5f1b5ad6f45be7211f524328de81f7e9754be /drivers/crypto | |
parent | 45175476ae2dbebc860d5cf486f2916044343513 (diff) | |
parent | 02c0241b600e4ab8a732c89749e252165145d60c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- Do not idle omap device between crypto operations in one session.
- Added sha224/sha384 shims for SSSE3.
- More optimisations for camellia-aesni-avx2.
- Removed defunct blowfish/twofish AVX2 implementations.
- Added unaligned buffer self-tests.
- Added PCLMULQDQ optimisation for CRCT10DIF.
- Added support for Freescale's DCP co-processor
- Misc fixes.
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (44 commits)
crypto: testmgr - test hash implementations with unaligned buffers
crypto: testmgr - test AEADs with unaligned buffers
crypto: testmgr - test skciphers with unaligned buffers
crypto: testmgr - check that entries in alg_test_descs are in correct order
Revert "crypto: twofish - add AVX2/x86_64 assembler implementation of twofish cipher"
Revert "crypto: blowfish - add AVX2/x86_64 implementation of blowfish cipher"
crypto: camellia-aesni-avx2 - tune assembly code for more performance
hwrng: bcm2835 - fix MODULE_LICENSE tag
hwrng: nomadik - use clk_prepare_enable()
crypto: picoxcell - replace strict_strtoul() with kstrtoul()
crypto: dcp - Staticize local symbols
crypto: dcp - Use NULL instead of 0
crypto: dcp - Use devm_* APIs
crypto: dcp - Remove redundant platform_set_drvdata()
hwrng: use platform_{get,set}_drvdata()
crypto: omap-aes - Don't idle/start AES device between Encrypt operations
crypto: crct10dif - Use PTR_RET
crypto: ux500 - Cocci spatch "resource_size.spatch"
crypto: sha256_ssse3 - add sha224 support
crypto: sha512_ssse3 - add sha384 support
...
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 12 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 10 | ||||
-rw-r--r-- | drivers/crypto/caam/desc.h | 22 | ||||
-rw-r--r-- | drivers/crypto/caam/desc_constr.h | 81 | ||||
-rw-r--r-- | drivers/crypto/caam/pdb.h | 1 | ||||
-rw-r--r-- | drivers/crypto/caam/regs.h | 42 | ||||
-rw-r--r-- | drivers/crypto/dcp.c | 912 | ||||
-rw-r--r-- | drivers/crypto/hifn_795x.c | 4 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.c | 1 | ||||
-rw-r--r-- | drivers/crypto/omap-aes.c | 36 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 7 | ||||
-rw-r--r-- | drivers/crypto/picoxcell_crypto.c | 2 | ||||
-rw-r--r-- | drivers/crypto/s5p-sss.c | 2 | ||||
-rw-r--r-- | drivers/crypto/ux500/cryp/cryp_core.c | 2 |
15 files changed, 1099 insertions, 36 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index dffb85525368..8ff7c230d82e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -278,7 +278,7 @@ config CRYPTO_DEV_PICOXCELL | |||
278 | 278 | ||
279 | config CRYPTO_DEV_SAHARA | 279 | config CRYPTO_DEV_SAHARA |
280 | tristate "Support for SAHARA crypto accelerator" | 280 | tristate "Support for SAHARA crypto accelerator" |
281 | depends on ARCH_MXC && EXPERIMENTAL && OF | 281 | depends on ARCH_MXC && OF |
282 | select CRYPTO_BLKCIPHER | 282 | select CRYPTO_BLKCIPHER |
283 | select CRYPTO_AES | 283 | select CRYPTO_AES |
284 | select CRYPTO_ECB | 284 | select CRYPTO_ECB |
@@ -286,6 +286,16 @@ config CRYPTO_DEV_SAHARA | |||
286 | This option enables support for the SAHARA HW crypto accelerator | 286 | This option enables support for the SAHARA HW crypto accelerator |
287 | found in some Freescale i.MX chips. | 287 | found in some Freescale i.MX chips. |
288 | 288 | ||
289 | config CRYPTO_DEV_DCP | ||
290 | tristate "Support for the DCP engine" | ||
291 | depends on ARCH_MXS && OF | ||
292 | select CRYPTO_BLKCIPHER | ||
293 | select CRYPTO_AES | ||
294 | select CRYPTO_CBC | ||
295 | help | ||
296 | This options enables support for the hardware crypto-acceleration | ||
297 | capabilities of the DCP co-processor | ||
298 | |||
289 | config CRYPTO_DEV_S5P | 299 | config CRYPTO_DEV_S5P |
290 | tristate "Support for Samsung S5PV210 crypto accelerator" | 300 | tristate "Support for Samsung S5PV210 crypto accelerator" |
291 | depends on ARCH_S5PV210 | 301 | depends on ARCH_S5PV210 |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 38ce13d3b79b..b4946ddd2550 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o | |||
13 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o | 13 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o |
14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o | 14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o |
15 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o | 15 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o |
16 | obj-$(CONFIG_CRYPTO_DEV_DCP) += dcp.o | ||
16 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | 17 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o |
17 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o | 18 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o |
18 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ | 19 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 6e94bcd94678..f5d6deced1cb 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -202,6 +202,7 @@ static int caam_probe(struct platform_device *pdev) | |||
202 | #ifdef CONFIG_DEBUG_FS | 202 | #ifdef CONFIG_DEBUG_FS |
203 | struct caam_perfmon *perfmon; | 203 | struct caam_perfmon *perfmon; |
204 | #endif | 204 | #endif |
205 | u64 cha_vid; | ||
205 | 206 | ||
206 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); | 207 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); |
207 | if (!ctrlpriv) | 208 | if (!ctrlpriv) |
@@ -293,11 +294,14 @@ static int caam_probe(struct platform_device *pdev) | |||
293 | return -ENOMEM; | 294 | return -ENOMEM; |
294 | } | 295 | } |
295 | 296 | ||
297 | cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id); | ||
298 | |||
296 | /* | 299 | /* |
297 | * RNG4 based SECs (v5+) need special initialization prior | 300 | * If SEC has RNG version >= 4 and RNG state handle has not been |
298 | * to executing any descriptors | 301 | * already instantiated ,do RNG instantiation |
299 | */ | 302 | */ |
300 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) { | 303 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && |
304 | !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { | ||
301 | kick_trng(pdev); | 305 | kick_trng(pdev); |
302 | ret = instantiate_rng(ctrlpriv->jrdev[0]); | 306 | ret = instantiate_rng(ctrlpriv->jrdev[0]); |
303 | if (ret) { | 307 | if (ret) { |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index f7f833be8c67..53b296f78b0d 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
@@ -231,7 +231,12 @@ struct sec4_sg_entry { | |||
231 | #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) | 231 | #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) |
232 | #define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT) | 232 | #define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT) |
233 | #define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT) | 233 | #define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT) |
234 | #define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT) | ||
234 | #define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT) | 235 | #define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT) |
236 | #define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT) | ||
237 | #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT) | ||
238 | #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT) | ||
239 | #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT) | ||
235 | #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT) | 240 | #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT) |
236 | 241 | ||
237 | /* Offset in source/destination */ | 242 | /* Offset in source/destination */ |
@@ -366,6 +371,7 @@ struct sec4_sg_entry { | |||
366 | #define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT) | 371 | #define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT) |
367 | #define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT) | 372 | #define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT) |
368 | #define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT) | 373 | #define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT) |
374 | #define FIFOLD_TYPE_NOINFOFIFO (0x0F << FIFOLD_TYPE_SHIFT) | ||
369 | 375 | ||
370 | #define FIFOLDST_LEN_MASK 0xffff | 376 | #define FIFOLDST_LEN_MASK 0xffff |
371 | #define FIFOLDST_EXT_LEN_MASK 0xffffffff | 377 | #define FIFOLDST_EXT_LEN_MASK 0xffffffff |
@@ -1294,10 +1300,10 @@ struct sec4_sg_entry { | |||
1294 | #define SQOUT_SGF 0x01000000 | 1300 | #define SQOUT_SGF 0x01000000 |
1295 | 1301 | ||
1296 | /* Appends to a previous pointer */ | 1302 | /* Appends to a previous pointer */ |
1297 | #define SQOUT_PRE 0x00800000 | 1303 | #define SQOUT_PRE SQIN_PRE |
1298 | 1304 | ||
1299 | /* Restore sequence with pointer/length */ | 1305 | /* Restore sequence with pointer/length */ |
1300 | #define SQOUT_RTO 0x00200000 | 1306 | #define SQOUT_RTO SQIN_RTO |
1301 | 1307 | ||
1302 | /* Use extended length following pointer */ | 1308 | /* Use extended length following pointer */ |
1303 | #define SQOUT_EXT 0x00400000 | 1309 | #define SQOUT_EXT 0x00400000 |
@@ -1359,6 +1365,7 @@ struct sec4_sg_entry { | |||
1359 | #define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT) | 1365 | #define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT) |
1360 | #define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT) | 1366 | #define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT) |
1361 | #define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT) | 1367 | #define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT) |
1368 | #define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT) | ||
1362 | #define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT) | 1369 | #define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT) |
1363 | #define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT) | 1370 | #define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT) |
1364 | #define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT) | 1371 | #define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT) |
@@ -1411,6 +1418,7 @@ struct sec4_sg_entry { | |||
1411 | #define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT) | 1418 | #define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT) |
1412 | #define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT) | 1419 | #define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT) |
1413 | #define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT) | 1420 | #define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT) |
1421 | #define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT) | ||
1414 | #define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT) | 1422 | #define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT) |
1415 | #define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT) | 1423 | #define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT) |
1416 | #define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT) | 1424 | #define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT) |
@@ -1425,6 +1433,7 @@ struct sec4_sg_entry { | |||
1425 | #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) | 1433 | #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) |
1426 | #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) | 1434 | #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) |
1427 | #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) | 1435 | #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) |
1436 | #define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT) | ||
1428 | #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) | 1437 | #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) |
1429 | #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) | 1438 | #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) |
1430 | #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) | 1439 | #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) |
@@ -1600,4 +1609,13 @@ struct sec4_sg_entry { | |||
1600 | #define NFIFOENTRY_PLEN_SHIFT 0 | 1609 | #define NFIFOENTRY_PLEN_SHIFT 0 |
1601 | #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) | 1610 | #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) |
1602 | 1611 | ||
1612 | /* Append Load Immediate Command */ | ||
1613 | #define FD_CMD_APPEND_LOAD_IMMEDIATE 0x80000000 | ||
1614 | |||
1615 | /* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */ | ||
1616 | #define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN 0x40000000 | ||
1617 | |||
1618 | /* Frame Descriptor Command for Replacement Job Descriptor */ | ||
1619 | #define FD_CMD_REPLACE_JOB_DESC 0x20000000 | ||
1620 | |||
1603 | #endif /* DESC_H */ | 1621 | #endif /* DESC_H */ |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index c85c1f058401..fe3bfd1b08ca 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -110,6 +110,26 @@ static inline void append_cmd(u32 *desc, u32 command) | |||
110 | (*desc)++; | 110 | (*desc)++; |
111 | } | 111 | } |
112 | 112 | ||
113 | #define append_u32 append_cmd | ||
114 | |||
115 | static inline void append_u64(u32 *desc, u64 data) | ||
116 | { | ||
117 | u32 *offset = desc_end(desc); | ||
118 | |||
119 | *offset = upper_32_bits(data); | ||
120 | *(++offset) = lower_32_bits(data); | ||
121 | |||
122 | (*desc) += 2; | ||
123 | } | ||
124 | |||
125 | /* Write command without affecting header, and return pointer to next word */ | ||
126 | static inline u32 *write_cmd(u32 *desc, u32 command) | ||
127 | { | ||
128 | *desc = command; | ||
129 | |||
130 | return desc + 1; | ||
131 | } | ||
132 | |||
113 | static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, | 133 | static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, |
114 | u32 command) | 134 | u32 command) |
115 | { | 135 | { |
@@ -122,7 +142,8 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, | |||
122 | unsigned int len, u32 command) | 142 | unsigned int len, u32 command) |
123 | { | 143 | { |
124 | append_cmd(desc, command); | 144 | append_cmd(desc, command); |
125 | append_ptr(desc, ptr); | 145 | if (!(command & (SQIN_RTO | SQIN_PRE))) |
146 | append_ptr(desc, ptr); | ||
126 | append_cmd(desc, len); | 147 | append_cmd(desc, len); |
127 | } | 148 | } |
128 | 149 | ||
@@ -176,17 +197,36 @@ static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \ | |||
176 | } | 197 | } |
177 | APPEND_CMD_PTR(key, KEY) | 198 | APPEND_CMD_PTR(key, KEY) |
178 | APPEND_CMD_PTR(load, LOAD) | 199 | APPEND_CMD_PTR(load, LOAD) |
179 | APPEND_CMD_PTR(store, STORE) | ||
180 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) | 200 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) |
181 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) | 201 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) |
182 | 202 | ||
203 | static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len, | ||
204 | u32 options) | ||
205 | { | ||
206 | u32 cmd_src; | ||
207 | |||
208 | cmd_src = options & LDST_SRCDST_MASK; | ||
209 | |||
210 | append_cmd(desc, CMD_STORE | options | len); | ||
211 | |||
212 | /* The following options do not require pointer */ | ||
213 | if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED || | ||
214 | cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB || | ||
215 | cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE || | ||
216 | cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE)) | ||
217 | append_ptr(desc, ptr); | ||
218 | } | ||
219 | |||
183 | #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ | 220 | #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ |
184 | static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \ | 221 | static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \ |
185 | unsigned int len, \ | 222 | unsigned int len, \ |
186 | u32 options) \ | 223 | u32 options) \ |
187 | { \ | 224 | { \ |
188 | PRINT_POS; \ | 225 | PRINT_POS; \ |
189 | append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ | 226 | if (options & (SQIN_RTO | SQIN_PRE)) \ |
227 | append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \ | ||
228 | else \ | ||
229 | append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ | ||
190 | } | 230 | } |
191 | APPEND_SEQ_PTR_INTLEN(in, IN) | 231 | APPEND_SEQ_PTR_INTLEN(in, IN) |
192 | APPEND_SEQ_PTR_INTLEN(out, OUT) | 232 | APPEND_SEQ_PTR_INTLEN(out, OUT) |
@@ -259,7 +299,7 @@ APPEND_CMD_RAW_IMM(load, LOAD, u32); | |||
259 | */ | 299 | */ |
260 | #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ | 300 | #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ |
261 | append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ | 301 | append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ |
262 | MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK)); | 302 | MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len); |
263 | 303 | ||
264 | #define append_math_add(desc, dest, src0, src1, len) \ | 304 | #define append_math_add(desc, dest, src0, src1, len) \ |
265 | APPEND_MATH(ADD, desc, dest, src0, src1, len) | 305 | APPEND_MATH(ADD, desc, dest, src0, src1, len) |
@@ -279,6 +319,8 @@ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ | |||
279 | APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) | 319 | APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) |
280 | #define append_math_rshift(desc, dest, src0, src1, len) \ | 320 | #define append_math_rshift(desc, dest, src0, src1, len) \ |
281 | APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) | 321 | APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) |
322 | #define append_math_ldshift(desc, dest, src0, src1, len) \ | ||
323 | APPEND_MATH(SHLD, desc, dest, src0, src1, len) | ||
282 | 324 | ||
283 | /* Exactly one source is IMM. Data is passed in as u32 value */ | 325 | /* Exactly one source is IMM. Data is passed in as u32 value */ |
284 | #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ | 326 | #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ |
@@ -305,3 +347,34 @@ do { \ | |||
305 | APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) | 347 | APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) |
306 | #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ | 348 | #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ |
307 | APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) | 349 | APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) |
350 | |||
351 | /* Exactly one source is IMM. Data is passed in as u64 value */ | ||
352 | #define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \ | ||
353 | do { \ | ||
354 | u32 upper = (data >> 16) >> 16; \ | ||
355 | APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \ | ||
356 | (upper ? 0 : MATH_IFB)); \ | ||
357 | if (upper) \ | ||
358 | append_u64(desc, data); \ | ||
359 | else \ | ||
360 | append_u32(desc, data); \ | ||
361 | } while (0) | ||
362 | |||
363 | #define append_math_add_imm_u64(desc, dest, src0, src1, data) \ | ||
364 | APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data) | ||
365 | #define append_math_sub_imm_u64(desc, dest, src0, src1, data) \ | ||
366 | APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data) | ||
367 | #define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \ | ||
368 | APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data) | ||
369 | #define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \ | ||
370 | APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data) | ||
371 | #define append_math_and_imm_u64(desc, dest, src0, src1, data) \ | ||
372 | APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data) | ||
373 | #define append_math_or_imm_u64(desc, dest, src0, src1, data) \ | ||
374 | APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data) | ||
375 | #define append_math_xor_imm_u64(desc, dest, src0, src1, data) \ | ||
376 | APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data) | ||
377 | #define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \ | ||
378 | APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) | ||
379 | #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ | ||
380 | APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) | ||
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h index 62950d22ac13..3a87c0cf879a 100644 --- a/drivers/crypto/caam/pdb.h +++ b/drivers/crypto/caam/pdb.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ | 44 | #define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ |
45 | #define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */ | 45 | #define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */ |
46 | #define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */ | 46 | #define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */ |
47 | #define PDBOPTS_ESP_AOFL 0x04 /* adjust out frame len (decap, SEC>=5.3)*/ | ||
47 | #define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */ | 48 | #define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */ |
48 | #define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */ | 49 | #define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */ |
49 | #define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */ | 50 | #define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */ |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index cd6fedad9935..c09142fc13e3 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -117,6 +117,43 @@ struct jr_outentry { | |||
117 | #define CHA_NUM_DECONUM_SHIFT 56 | 117 | #define CHA_NUM_DECONUM_SHIFT 56 |
118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) | 118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) |
119 | 119 | ||
120 | /* CHA Version IDs */ | ||
121 | #define CHA_ID_AES_SHIFT 0 | ||
122 | #define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT) | ||
123 | |||
124 | #define CHA_ID_DES_SHIFT 4 | ||
125 | #define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT) | ||
126 | |||
127 | #define CHA_ID_ARC4_SHIFT 8 | ||
128 | #define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT) | ||
129 | |||
130 | #define CHA_ID_MD_SHIFT 12 | ||
131 | #define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT) | ||
132 | |||
133 | #define CHA_ID_RNG_SHIFT 16 | ||
134 | #define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT) | ||
135 | |||
136 | #define CHA_ID_SNW8_SHIFT 20 | ||
137 | #define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT) | ||
138 | |||
139 | #define CHA_ID_KAS_SHIFT 24 | ||
140 | #define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT) | ||
141 | |||
142 | #define CHA_ID_PK_SHIFT 28 | ||
143 | #define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT) | ||
144 | |||
145 | #define CHA_ID_CRC_SHIFT 32 | ||
146 | #define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT) | ||
147 | |||
148 | #define CHA_ID_SNW9_SHIFT 36 | ||
149 | #define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT) | ||
150 | |||
151 | #define CHA_ID_DECO_SHIFT 56 | ||
152 | #define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT) | ||
153 | |||
154 | #define CHA_ID_JR_SHIFT 60 | ||
155 | #define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT) | ||
156 | |||
120 | struct sec_vid { | 157 | struct sec_vid { |
121 | u16 ip_id; | 158 | u16 ip_id; |
122 | u8 maj_rev; | 159 | u8 maj_rev; |
@@ -228,7 +265,10 @@ struct rng4tst { | |||
228 | u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ | 265 | u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ |
229 | u32 rtfrqcnt; /* PRGM=0: freq. count register */ | 266 | u32 rtfrqcnt; /* PRGM=0: freq. count register */ |
230 | }; | 267 | }; |
231 | u32 rsvd1[56]; | 268 | u32 rsvd1[40]; |
269 | #define RDSTA_IF0 0x00000001 | ||
270 | u32 rdsta; | ||
271 | u32 rsvd2[15]; | ||
232 | }; | 272 | }; |
233 | 273 | ||
234 | /* | 274 | /* |
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c new file mode 100644 index 000000000000..a8a7dd4b0d25 --- /dev/null +++ b/drivers/crypto/dcp.c | |||
@@ -0,0 +1,912 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for DCP cryptographic accelerator. | ||
5 | * | ||
6 | * Copyright (c) 2013 | ||
7 | * Author: Tobias Rauter <tobias.rauter@gmail.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c | ||
14 | */ | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/completion.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/crypto.h> | ||
28 | #include <linux/miscdevice.h> | ||
29 | |||
30 | #include <crypto/scatterwalk.h> | ||
31 | #include <crypto/aes.h> | ||
32 | |||
33 | |||
34 | /* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/ | ||
35 | #define DBS_IOCTL_BASE 'd' | ||
36 | #define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16]) | ||
37 | #define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16]) | ||
38 | |||
39 | /* DCP channel used for AES */ | ||
40 | #define USED_CHANNEL 1 | ||
41 | /* Ring Buffers' maximum size */ | ||
42 | #define DCP_MAX_PKG 20 | ||
43 | |||
44 | /* Control Register */ | ||
45 | #define DCP_REG_CTRL 0x000 | ||
46 | #define DCP_CTRL_SFRST (1<<31) | ||
47 | #define DCP_CTRL_CLKGATE (1<<30) | ||
48 | #define DCP_CTRL_CRYPTO_PRESENT (1<<29) | ||
49 | #define DCP_CTRL_SHA_PRESENT (1<<28) | ||
50 | #define DCP_CTRL_GATHER_RES_WRITE (1<<23) | ||
51 | #define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22) | ||
52 | #define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21) | ||
53 | #define DCP_CTRL_CH_IRQ_E_0 0x01 | ||
54 | #define DCP_CTRL_CH_IRQ_E_1 0x02 | ||
55 | #define DCP_CTRL_CH_IRQ_E_2 0x04 | ||
56 | #define DCP_CTRL_CH_IRQ_E_3 0x08 | ||
57 | |||
58 | /* Status register */ | ||
59 | #define DCP_REG_STAT 0x010 | ||
60 | #define DCP_STAT_OTP_KEY_READY (1<<28) | ||
61 | #define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F) | ||
62 | #define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F) | ||
63 | #define DCP_STAT_IRQ(stat) (stat&0x0F) | ||
64 | #define DCP_STAT_CHAN_0 (0x01) | ||
65 | #define DCP_STAT_CHAN_1 (0x02) | ||
66 | #define DCP_STAT_CHAN_2 (0x04) | ||
67 | #define DCP_STAT_CHAN_3 (0x08) | ||
68 | |||
69 | /* Channel Control Register */ | ||
70 | #define DCP_REG_CHAN_CTRL 0x020 | ||
71 | #define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16) | ||
72 | #define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100) | ||
73 | #define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200) | ||
74 | #define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400) | ||
75 | #define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800) | ||
76 | #define DCP_CHAN_CTRL_ENABLE_0 (0x01) | ||
77 | #define DCP_CHAN_CTRL_ENABLE_1 (0x02) | ||
78 | #define DCP_CHAN_CTRL_ENABLE_2 (0x04) | ||
79 | #define DCP_CHAN_CTRL_ENABLE_3 (0x08) | ||
80 | |||
81 | /* | ||
82 | * Channel Registers: | ||
83 | * The DCP has 4 channels. Each of this channels | ||
84 | * has 4 registers (command pointer, semaphore, status and options). | ||
85 | * The address of register REG of channel CHAN is obtained by | ||
86 | * dcp_chan_reg(REG, CHAN) | ||
87 | */ | ||
88 | #define DCP_REG_CHAN_PTR 0x00000100 | ||
89 | #define DCP_REG_CHAN_SEMA 0x00000110 | ||
90 | #define DCP_REG_CHAN_STAT 0x00000120 | ||
91 | #define DCP_REG_CHAN_OPT 0x00000130 | ||
92 | |||
93 | #define DCP_CHAN_STAT_NEXT_CHAIN_IS_0 0x010000 | ||
94 | #define DCP_CHAN_STAT_NO_CHAIN 0x020000 | ||
95 | #define DCP_CHAN_STAT_CONTEXT_ERROR 0x030000 | ||
96 | #define DCP_CHAN_STAT_PAYLOAD_ERROR 0x040000 | ||
97 | #define DCP_CHAN_STAT_INVALID_MODE 0x050000 | ||
98 | #define DCP_CHAN_STAT_PAGEFAULT 0x40 | ||
99 | #define DCP_CHAN_STAT_DST 0x20 | ||
100 | #define DCP_CHAN_STAT_SRC 0x10 | ||
101 | #define DCP_CHAN_STAT_PACKET 0x08 | ||
102 | #define DCP_CHAN_STAT_SETUP 0x04 | ||
103 | #define DCP_CHAN_STAT_MISMATCH 0x02 | ||
104 | |||
105 | /* hw packet control*/ | ||
106 | |||
107 | #define DCP_PKT_PAYLOAD_KEY (1<<11) | ||
108 | #define DCP_PKT_OTP_KEY (1<<10) | ||
109 | #define DCP_PKT_CIPHER_INIT (1<<9) | ||
110 | #define DCP_PKG_CIPHER_ENCRYPT (1<<8) | ||
111 | #define DCP_PKT_CIPHER_ENABLE (1<<5) | ||
112 | #define DCP_PKT_DECR_SEM (1<<1) | ||
113 | #define DCP_PKT_CHAIN (1<<2) | ||
114 | #define DCP_PKT_IRQ 1 | ||
115 | |||
116 | #define DCP_PKT_MODE_CBC (1<<4) | ||
117 | #define DCP_PKT_KEYSELECT_OTP (0xFF<<8) | ||
118 | |||
119 | /* cipher flags */ | ||
120 | #define DCP_ENC 0x0001 | ||
121 | #define DCP_DEC 0x0002 | ||
122 | #define DCP_ECB 0x0004 | ||
123 | #define DCP_CBC 0x0008 | ||
124 | #define DCP_CBC_INIT 0x0010 | ||
125 | #define DCP_NEW_KEY 0x0040 | ||
126 | #define DCP_OTP_KEY 0x0080 | ||
127 | #define DCP_AES 0x1000 | ||
128 | |||
129 | /* DCP Flags */ | ||
130 | #define DCP_FLAG_BUSY 0x01 | ||
131 | #define DCP_FLAG_PRODUCING 0x02 | ||
132 | |||
133 | /* clock defines */ | ||
134 | #define CLOCK_ON 1 | ||
135 | #define CLOCK_OFF 0 | ||
136 | |||
137 | struct dcp_dev_req_ctx { | ||
138 | int mode; | ||
139 | }; | ||
140 | |||
141 | struct dcp_op { | ||
142 | unsigned int flags; | ||
143 | u8 key[AES_KEYSIZE_128]; | ||
144 | int keylen; | ||
145 | |||
146 | struct ablkcipher_request *req; | ||
147 | struct crypto_ablkcipher *fallback; | ||
148 | |||
149 | uint32_t stat; | ||
150 | uint32_t pkt1; | ||
151 | uint32_t pkt2; | ||
152 | struct ablkcipher_walk walk; | ||
153 | }; | ||
154 | |||
155 | struct dcp_dev { | ||
156 | struct device *dev; | ||
157 | void __iomem *dcp_regs_base; | ||
158 | |||
159 | int dcp_vmi_irq; | ||
160 | int dcp_irq; | ||
161 | |||
162 | spinlock_t queue_lock; | ||
163 | struct crypto_queue queue; | ||
164 | |||
165 | uint32_t pkt_produced; | ||
166 | uint32_t pkt_consumed; | ||
167 | |||
168 | struct dcp_hw_packet *hw_pkg[DCP_MAX_PKG]; | ||
169 | dma_addr_t hw_phys_pkg; | ||
170 | |||
171 | /* [KEY][IV] Both with 16 Bytes */ | ||
172 | u8 *payload_base; | ||
173 | dma_addr_t payload_base_dma; | ||
174 | |||
175 | |||
176 | struct tasklet_struct done_task; | ||
177 | struct tasklet_struct queue_task; | ||
178 | struct timer_list watchdog; | ||
179 | |||
180 | unsigned long flags; | ||
181 | |||
182 | struct dcp_op *ctx; | ||
183 | |||
184 | struct miscdevice dcp_bootstream_misc; | ||
185 | }; | ||
186 | |||
187 | struct dcp_hw_packet { | ||
188 | uint32_t next; | ||
189 | uint32_t pkt1; | ||
190 | uint32_t pkt2; | ||
191 | uint32_t src; | ||
192 | uint32_t dst; | ||
193 | uint32_t size; | ||
194 | uint32_t payload; | ||
195 | uint32_t stat; | ||
196 | }; | ||
197 | |||
198 | static struct dcp_dev *global_dev; | ||
199 | |||
200 | static inline u32 dcp_chan_reg(u32 reg, int chan) | ||
201 | { | ||
202 | return reg + (chan) * 0x40; | ||
203 | } | ||
204 | |||
205 | static inline void dcp_write(struct dcp_dev *dev, u32 data, u32 reg) | ||
206 | { | ||
207 | writel(data, dev->dcp_regs_base + reg); | ||
208 | } | ||
209 | |||
210 | static inline void dcp_set(struct dcp_dev *dev, u32 data, u32 reg) | ||
211 | { | ||
212 | writel(data, dev->dcp_regs_base + (reg | 0x04)); | ||
213 | } | ||
214 | |||
215 | static inline void dcp_clear(struct dcp_dev *dev, u32 data, u32 reg) | ||
216 | { | ||
217 | writel(data, dev->dcp_regs_base + (reg | 0x08)); | ||
218 | } | ||
219 | |||
220 | static inline void dcp_toggle(struct dcp_dev *dev, u32 data, u32 reg) | ||
221 | { | ||
222 | writel(data, dev->dcp_regs_base + (reg | 0x0C)); | ||
223 | } | ||
224 | |||
225 | static inline unsigned int dcp_read(struct dcp_dev *dev, u32 reg) | ||
226 | { | ||
227 | return readl(dev->dcp_regs_base + reg); | ||
228 | } | ||
229 | |||
230 | static void dcp_dma_unmap(struct dcp_dev *dev, struct dcp_hw_packet *pkt) | ||
231 | { | ||
232 | dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE); | ||
233 | dma_unmap_page(dev->dev, pkt->dst, pkt->size, DMA_FROM_DEVICE); | ||
234 | dev_dbg(dev->dev, "unmap packet %x", (unsigned int) pkt); | ||
235 | } | ||
236 | |||
237 | static int dcp_dma_map(struct dcp_dev *dev, | ||
238 | struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt) | ||
239 | { | ||
240 | dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt); | ||
241 | /* align to length = 16 */ | ||
242 | pkt->size = walk->nbytes - (walk->nbytes % 16); | ||
243 | |||
244 | pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset, | ||
245 | pkt->size, DMA_TO_DEVICE); | ||
246 | |||
247 | if (pkt->src == 0) { | ||
248 | dev_err(dev->dev, "Unable to map src"); | ||
249 | return -ENOMEM; | ||
250 | } | ||
251 | |||
252 | pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset, | ||
253 | pkt->size, DMA_FROM_DEVICE); | ||
254 | |||
255 | if (pkt->dst == 0) { | ||
256 | dev_err(dev->dev, "Unable to map dst"); | ||
257 | dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE); | ||
258 | return -ENOMEM; | ||
259 | } | ||
260 | |||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | static void dcp_op_one(struct dcp_dev *dev, struct dcp_hw_packet *pkt, | ||
265 | uint8_t last) | ||
266 | { | ||
267 | struct dcp_op *ctx = dev->ctx; | ||
268 | pkt->pkt1 = ctx->pkt1; | ||
269 | pkt->pkt2 = ctx->pkt2; | ||
270 | |||
271 | pkt->payload = (u32) dev->payload_base_dma; | ||
272 | pkt->stat = 0; | ||
273 | |||
274 | if (ctx->flags & DCP_CBC_INIT) { | ||
275 | pkt->pkt1 |= DCP_PKT_CIPHER_INIT; | ||
276 | ctx->flags &= ~DCP_CBC_INIT; | ||
277 | } | ||
278 | |||
279 | mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(500)); | ||
280 | pkt->pkt1 |= DCP_PKT_IRQ; | ||
281 | if (!last) | ||
282 | pkt->pkt1 |= DCP_PKT_CHAIN; | ||
283 | |||
284 | dev->pkt_produced++; | ||
285 | |||
286 | dcp_write(dev, 1, | ||
287 | dcp_chan_reg(DCP_REG_CHAN_SEMA, USED_CHANNEL)); | ||
288 | } | ||
289 | |||
290 | static void dcp_op_proceed(struct dcp_dev *dev) | ||
291 | { | ||
292 | struct dcp_op *ctx = dev->ctx; | ||
293 | struct dcp_hw_packet *pkt; | ||
294 | |||
295 | while (ctx->walk.nbytes) { | ||
296 | int err = 0; | ||
297 | |||
298 | pkt = dev->hw_pkg[dev->pkt_produced % DCP_MAX_PKG]; | ||
299 | err = dcp_dma_map(dev, &ctx->walk, pkt); | ||
300 | if (err) { | ||
301 | dev->ctx->stat |= err; | ||
302 | /* start timer to wait for already set up calls */ | ||
303 | mod_timer(&dev->watchdog, | ||
304 | jiffies + msecs_to_jiffies(500)); | ||
305 | break; | ||
306 | } | ||
307 | |||
308 | |||
309 | err = ctx->walk.nbytes - pkt->size; | ||
310 | ablkcipher_walk_done(dev->ctx->req, &dev->ctx->walk, err); | ||
311 | |||
312 | dcp_op_one(dev, pkt, ctx->walk.nbytes == 0); | ||
313 | /* we have to wait if no space is left in buffer */ | ||
314 | if (dev->pkt_produced - dev->pkt_consumed == DCP_MAX_PKG) | ||
315 | break; | ||
316 | } | ||
317 | clear_bit(DCP_FLAG_PRODUCING, &dev->flags); | ||
318 | } | ||
319 | |||
320 | static void dcp_op_start(struct dcp_dev *dev, uint8_t use_walk) | ||
321 | { | ||
322 | struct dcp_op *ctx = dev->ctx; | ||
323 | |||
324 | if (ctx->flags & DCP_NEW_KEY) { | ||
325 | memcpy(dev->payload_base, ctx->key, ctx->keylen); | ||
326 | ctx->flags &= ~DCP_NEW_KEY; | ||
327 | } | ||
328 | |||
329 | ctx->pkt1 = 0; | ||
330 | ctx->pkt1 |= DCP_PKT_CIPHER_ENABLE; | ||
331 | ctx->pkt1 |= DCP_PKT_DECR_SEM; | ||
332 | |||
333 | if (ctx->flags & DCP_OTP_KEY) | ||
334 | ctx->pkt1 |= DCP_PKT_OTP_KEY; | ||
335 | else | ||
336 | ctx->pkt1 |= DCP_PKT_PAYLOAD_KEY; | ||
337 | |||
338 | if (ctx->flags & DCP_ENC) | ||
339 | ctx->pkt1 |= DCP_PKG_CIPHER_ENCRYPT; | ||
340 | |||
341 | ctx->pkt2 = 0; | ||
342 | if (ctx->flags & DCP_CBC) | ||
343 | ctx->pkt2 |= DCP_PKT_MODE_CBC; | ||
344 | |||
345 | dev->pkt_produced = 0; | ||
346 | dev->pkt_consumed = 0; | ||
347 | |||
348 | ctx->stat = 0; | ||
349 | dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL)); | ||
350 | dcp_write(dev, (u32) dev->hw_phys_pkg, | ||
351 | dcp_chan_reg(DCP_REG_CHAN_PTR, USED_CHANNEL)); | ||
352 | |||
353 | set_bit(DCP_FLAG_PRODUCING, &dev->flags); | ||
354 | |||
355 | if (use_walk) { | ||
356 | ablkcipher_walk_init(&ctx->walk, ctx->req->dst, | ||
357 | ctx->req->src, ctx->req->nbytes); | ||
358 | ablkcipher_walk_phys(ctx->req, &ctx->walk); | ||
359 | dcp_op_proceed(dev); | ||
360 | } else { | ||
361 | dcp_op_one(dev, dev->hw_pkg[0], 1); | ||
362 | clear_bit(DCP_FLAG_PRODUCING, &dev->flags); | ||
363 | } | ||
364 | } | ||
365 | |||
366 | static void dcp_done_task(unsigned long data) | ||
367 | { | ||
368 | struct dcp_dev *dev = (struct dcp_dev *)data; | ||
369 | struct dcp_hw_packet *last_packet; | ||
370 | int fin; | ||
371 | fin = 0; | ||
372 | |||
373 | for (last_packet = dev->hw_pkg[(dev->pkt_consumed) % DCP_MAX_PKG]; | ||
374 | last_packet->stat == 1; | ||
375 | last_packet = | ||
376 | dev->hw_pkg[++(dev->pkt_consumed) % DCP_MAX_PKG]) { | ||
377 | |||
378 | dcp_dma_unmap(dev, last_packet); | ||
379 | last_packet->stat = 0; | ||
380 | fin++; | ||
381 | } | ||
382 | /* the last call of this function already consumed this IRQ's packet */ | ||
383 | if (fin == 0) | ||
384 | return; | ||
385 | |||
386 | dev_dbg(dev->dev, | ||
387 | "Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d", | ||
388 | dev->ctx->stat, fin, dev->pkt_produced, dev->pkt_consumed); | ||
389 | |||
390 | last_packet = dev->hw_pkg[(dev->pkt_consumed - 1) % DCP_MAX_PKG]; | ||
391 | if (!dev->ctx->stat && last_packet->pkt1 & DCP_PKT_CHAIN) { | ||
392 | if (!test_and_set_bit(DCP_FLAG_PRODUCING, &dev->flags)) | ||
393 | dcp_op_proceed(dev); | ||
394 | return; | ||
395 | } | ||
396 | |||
397 | while (unlikely(dev->pkt_consumed < dev->pkt_produced)) { | ||
398 | dcp_dma_unmap(dev, | ||
399 | dev->hw_pkg[dev->pkt_consumed++ % DCP_MAX_PKG]); | ||
400 | } | ||
401 | |||
402 | if (dev->ctx->flags & DCP_OTP_KEY) { | ||
403 | /* we used the miscdevice, no walk to finish */ | ||
404 | clear_bit(DCP_FLAG_BUSY, &dev->flags); | ||
405 | return; | ||
406 | } | ||
407 | |||
408 | ablkcipher_walk_complete(&dev->ctx->walk); | ||
409 | dev->ctx->req->base.complete(&dev->ctx->req->base, | ||
410 | dev->ctx->stat); | ||
411 | dev->ctx->req = NULL; | ||
412 | /* in case there are other requests in the queue */ | ||
413 | tasklet_schedule(&dev->queue_task); | ||
414 | } | ||
415 | |||
416 | static void dcp_watchdog(unsigned long data) | ||
417 | { | ||
418 | struct dcp_dev *dev = (struct dcp_dev *)data; | ||
419 | dev->ctx->stat |= dcp_read(dev, | ||
420 | dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL)); | ||
421 | |||
422 | dev_err(dev->dev, "Timeout, Channel status: %x", dev->ctx->stat); | ||
423 | |||
424 | if (!dev->ctx->stat) | ||
425 | dev->ctx->stat = -ETIMEDOUT; | ||
426 | |||
427 | dcp_done_task(data); | ||
428 | } | ||
429 | |||
430 | |||
431 | static irqreturn_t dcp_common_irq(int irq, void *context) | ||
432 | { | ||
433 | u32 msk; | ||
434 | struct dcp_dev *dev = (struct dcp_dev *) context; | ||
435 | |||
436 | del_timer(&dev->watchdog); | ||
437 | |||
438 | msk = DCP_STAT_IRQ(dcp_read(dev, DCP_REG_STAT)); | ||
439 | dcp_clear(dev, msk, DCP_REG_STAT); | ||
440 | if (msk == 0) | ||
441 | return IRQ_NONE; | ||
442 | |||
443 | dev->ctx->stat |= dcp_read(dev, | ||
444 | dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL)); | ||
445 | |||
446 | if (msk & DCP_STAT_CHAN_1) | ||
447 | tasklet_schedule(&dev->done_task); | ||
448 | |||
449 | return IRQ_HANDLED; | ||
450 | } | ||
451 | |||
452 | static irqreturn_t dcp_vmi_irq(int irq, void *context) | ||
453 | { | ||
454 | return dcp_common_irq(irq, context); | ||
455 | } | ||
456 | |||
457 | static irqreturn_t dcp_irq(int irq, void *context) | ||
458 | { | ||
459 | return dcp_common_irq(irq, context); | ||
460 | } | ||
461 | |||
462 | static void dcp_crypt(struct dcp_dev *dev, struct dcp_op *ctx) | ||
463 | { | ||
464 | dev->ctx = ctx; | ||
465 | |||
466 | if ((ctx->flags & DCP_CBC) && ctx->req->info) { | ||
467 | ctx->flags |= DCP_CBC_INIT; | ||
468 | memcpy(dev->payload_base + AES_KEYSIZE_128, | ||
469 | ctx->req->info, AES_KEYSIZE_128); | ||
470 | } | ||
471 | |||
472 | dcp_op_start(dev, 1); | ||
473 | } | ||
474 | |||
475 | static void dcp_queue_task(unsigned long data) | ||
476 | { | ||
477 | struct dcp_dev *dev = (struct dcp_dev *) data; | ||
478 | struct crypto_async_request *async_req, *backlog; | ||
479 | struct crypto_ablkcipher *tfm; | ||
480 | struct dcp_op *ctx; | ||
481 | struct dcp_dev_req_ctx *rctx; | ||
482 | struct ablkcipher_request *req; | ||
483 | unsigned long flags; | ||
484 | |||
485 | spin_lock_irqsave(&dev->queue_lock, flags); | ||
486 | |||
487 | backlog = crypto_get_backlog(&dev->queue); | ||
488 | async_req = crypto_dequeue_request(&dev->queue); | ||
489 | |||
490 | spin_unlock_irqrestore(&dev->queue_lock, flags); | ||
491 | |||
492 | if (!async_req) | ||
493 | goto ret_nothing_done; | ||
494 | |||
495 | if (backlog) | ||
496 | backlog->complete(backlog, -EINPROGRESS); | ||
497 | |||
498 | req = ablkcipher_request_cast(async_req); | ||
499 | tfm = crypto_ablkcipher_reqtfm(req); | ||
500 | rctx = ablkcipher_request_ctx(req); | ||
501 | ctx = crypto_ablkcipher_ctx(tfm); | ||
502 | |||
503 | if (!req->src || !req->dst) | ||
504 | goto ret_nothing_done; | ||
505 | |||
506 | ctx->flags |= rctx->mode; | ||
507 | ctx->req = req; | ||
508 | |||
509 | dcp_crypt(dev, ctx); | ||
510 | |||
511 | return; | ||
512 | |||
513 | ret_nothing_done: | ||
514 | clear_bit(DCP_FLAG_BUSY, &dev->flags); | ||
515 | } | ||
516 | |||
517 | |||
518 | static int dcp_cra_init(struct crypto_tfm *tfm) | ||
519 | { | ||
520 | const char *name = tfm->__crt_alg->cra_name; | ||
521 | struct dcp_op *ctx = crypto_tfm_ctx(tfm); | ||
522 | |||
523 | tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx); | ||
524 | |||
525 | ctx->fallback = crypto_alloc_ablkcipher(name, 0, | ||
526 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | ||
527 | |||
528 | if (IS_ERR(ctx->fallback)) { | ||
529 | dev_err(global_dev->dev, "Error allocating fallback algo %s\n", | ||
530 | name); | ||
531 | return PTR_ERR(ctx->fallback); | ||
532 | } | ||
533 | |||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | static void dcp_cra_exit(struct crypto_tfm *tfm) | ||
538 | { | ||
539 | struct dcp_op *ctx = crypto_tfm_ctx(tfm); | ||
540 | |||
541 | if (ctx->fallback) | ||
542 | crypto_free_ablkcipher(ctx->fallback); | ||
543 | |||
544 | ctx->fallback = NULL; | ||
545 | } | ||
546 | |||
547 | /* async interface */ | ||
548 | static int dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
549 | unsigned int len) | ||
550 | { | ||
551 | struct dcp_op *ctx = crypto_ablkcipher_ctx(tfm); | ||
552 | unsigned int ret = 0; | ||
553 | ctx->keylen = len; | ||
554 | ctx->flags = 0; | ||
555 | if (len == AES_KEYSIZE_128) { | ||
556 | if (memcmp(ctx->key, key, AES_KEYSIZE_128)) { | ||
557 | memcpy(ctx->key, key, len); | ||
558 | ctx->flags |= DCP_NEW_KEY; | ||
559 | } | ||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | ||
564 | ctx->fallback->base.crt_flags |= | ||
565 | (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); | ||
566 | |||
567 | ret = crypto_ablkcipher_setkey(ctx->fallback, key, len); | ||
568 | if (ret) { | ||
569 | struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm); | ||
570 | |||
571 | tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK; | ||
572 | tfm_aux->crt_flags |= | ||
573 | (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK); | ||
574 | } | ||
575 | return ret; | ||
576 | } | ||
577 | |||
578 | static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode) | ||
579 | { | ||
580 | struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req); | ||
581 | struct dcp_dev *dev = global_dev; | ||
582 | unsigned long flags; | ||
583 | int err = 0; | ||
584 | |||
585 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) | ||
586 | return -EINVAL; | ||
587 | |||
588 | rctx->mode = mode; | ||
589 | |||
590 | spin_lock_irqsave(&dev->queue_lock, flags); | ||
591 | err = ablkcipher_enqueue_request(&dev->queue, req); | ||
592 | spin_unlock_irqrestore(&dev->queue_lock, flags); | ||
593 | |||
594 | flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags); | ||
595 | |||
596 | if (!(flags & DCP_FLAG_BUSY)) | ||
597 | tasklet_schedule(&dev->queue_task); | ||
598 | |||
599 | return err; | ||
600 | } | ||
601 | |||
602 | static int dcp_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
603 | { | ||
604 | struct crypto_tfm *tfm = | ||
605 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
606 | struct dcp_op *ctx = crypto_ablkcipher_ctx( | ||
607 | crypto_ablkcipher_reqtfm(req)); | ||
608 | |||
609 | if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { | ||
610 | int err = 0; | ||
611 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
612 | err = crypto_ablkcipher_encrypt(req); | ||
613 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
614 | return err; | ||
615 | } | ||
616 | |||
617 | return dcp_aes_cbc_crypt(req, DCP_AES | DCP_ENC | DCP_CBC); | ||
618 | } | ||
619 | |||
620 | static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
621 | { | ||
622 | struct crypto_tfm *tfm = | ||
623 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
624 | struct dcp_op *ctx = crypto_ablkcipher_ctx( | ||
625 | crypto_ablkcipher_reqtfm(req)); | ||
626 | |||
627 | if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { | ||
628 | int err = 0; | ||
629 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
630 | err = crypto_ablkcipher_decrypt(req); | ||
631 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
632 | return err; | ||
633 | } | ||
634 | return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC); | ||
635 | } | ||
636 | |||
637 | static struct crypto_alg algs[] = { | ||
638 | { | ||
639 | .cra_name = "cbc(aes)", | ||
640 | .cra_driver_name = "dcp-cbc-aes", | ||
641 | .cra_alignmask = 3, | ||
642 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | | ||
643 | CRYPTO_ALG_NEED_FALLBACK, | ||
644 | .cra_blocksize = AES_KEYSIZE_128, | ||
645 | .cra_type = &crypto_ablkcipher_type, | ||
646 | .cra_priority = 300, | ||
647 | .cra_u.ablkcipher = { | ||
648 | .min_keysize = AES_KEYSIZE_128, | ||
649 | .max_keysize = AES_KEYSIZE_128, | ||
650 | .setkey = dcp_aes_setkey, | ||
651 | .encrypt = dcp_aes_cbc_encrypt, | ||
652 | .decrypt = dcp_aes_cbc_decrypt, | ||
653 | .ivsize = AES_KEYSIZE_128, | ||
654 | } | ||
655 | |||
656 | }, | ||
657 | }; | ||
658 | |||
659 | /* DCP bootstream verification interface: uses OTP key for crypto */ | ||
660 | static int dcp_bootstream_open(struct inode *inode, struct file *file) | ||
661 | { | ||
662 | file->private_data = container_of((file->private_data), | ||
663 | struct dcp_dev, dcp_bootstream_misc); | ||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | static long dcp_bootstream_ioctl(struct file *file, | ||
668 | unsigned int cmd, unsigned long arg) | ||
669 | { | ||
670 | struct dcp_dev *dev = (struct dcp_dev *) file->private_data; | ||
671 | void __user *argp = (void __user *)arg; | ||
672 | int ret; | ||
673 | |||
674 | if (dev == NULL) | ||
675 | return -EBADF; | ||
676 | |||
677 | if (cmd != DBS_ENC && cmd != DBS_DEC) | ||
678 | return -EINVAL; | ||
679 | |||
680 | if (copy_from_user(dev->payload_base, argp, 16)) | ||
681 | return -EFAULT; | ||
682 | |||
683 | if (test_and_set_bit(DCP_FLAG_BUSY, &dev->flags)) | ||
684 | return -EAGAIN; | ||
685 | |||
686 | dev->ctx = kzalloc(sizeof(struct dcp_op), GFP_KERNEL); | ||
687 | if (!dev->ctx) { | ||
688 | dev_err(dev->dev, | ||
689 | "cannot allocate context for OTP crypto"); | ||
690 | clear_bit(DCP_FLAG_BUSY, &dev->flags); | ||
691 | return -ENOMEM; | ||
692 | } | ||
693 | |||
694 | dev->ctx->flags = DCP_AES | DCP_ECB | DCP_OTP_KEY | DCP_CBC_INIT; | ||
695 | dev->ctx->flags |= (cmd == DBS_ENC) ? DCP_ENC : DCP_DEC; | ||
696 | dev->hw_pkg[0]->src = dev->payload_base_dma; | ||
697 | dev->hw_pkg[0]->dst = dev->payload_base_dma; | ||
698 | dev->hw_pkg[0]->size = 16; | ||
699 | |||
700 | dcp_op_start(dev, 0); | ||
701 | |||
702 | while (test_bit(DCP_FLAG_BUSY, &dev->flags)) | ||
703 | cpu_relax(); | ||
704 | |||
705 | ret = dev->ctx->stat; | ||
706 | if (!ret && copy_to_user(argp, dev->payload_base, 16)) | ||
707 | ret = -EFAULT; | ||
708 | |||
709 | kfree(dev->ctx); | ||
710 | |||
711 | return ret; | ||
712 | } | ||
713 | |||
714 | static const struct file_operations dcp_bootstream_fops = { | ||
715 | .owner = THIS_MODULE, | ||
716 | .unlocked_ioctl = dcp_bootstream_ioctl, | ||
717 | .open = dcp_bootstream_open, | ||
718 | }; | ||
719 | |||
720 | static int dcp_probe(struct platform_device *pdev) | ||
721 | { | ||
722 | struct dcp_dev *dev = NULL; | ||
723 | struct resource *r; | ||
724 | int i, ret, j; | ||
725 | |||
726 | dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); | ||
727 | if (!dev) | ||
728 | return -ENOMEM; | ||
729 | |||
730 | global_dev = dev; | ||
731 | dev->dev = &pdev->dev; | ||
732 | |||
733 | platform_set_drvdata(pdev, dev); | ||
734 | |||
735 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
736 | if (!r) { | ||
737 | dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); | ||
738 | return -ENXIO; | ||
739 | } | ||
740 | dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start, | ||
741 | resource_size(r)); | ||
742 | |||
743 | dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); | ||
744 | udelay(10); | ||
745 | dcp_clear(dev, DCP_CTRL_SFRST | DCP_CTRL_CLKGATE, DCP_REG_CTRL); | ||
746 | |||
747 | dcp_write(dev, DCP_CTRL_GATHER_RES_WRITE | | ||
748 | DCP_CTRL_ENABLE_CONTEXT_CACHE | DCP_CTRL_CH_IRQ_E_1, | ||
749 | DCP_REG_CTRL); | ||
750 | |||
751 | dcp_write(dev, DCP_CHAN_CTRL_ENABLE_1, DCP_REG_CHAN_CTRL); | ||
752 | |||
753 | for (i = 0; i < 4; i++) | ||
754 | dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, i)); | ||
755 | |||
756 | dcp_clear(dev, -1, DCP_REG_STAT); | ||
757 | |||
758 | |||
759 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
760 | if (!r) { | ||
761 | dev_err(&pdev->dev, "can't get IRQ resource (0)\n"); | ||
762 | return -EIO; | ||
763 | } | ||
764 | dev->dcp_vmi_irq = r->start; | ||
765 | ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); | ||
766 | if (ret != 0) { | ||
767 | dev_err(&pdev->dev, "can't request_irq (0)\n"); | ||
768 | return -EIO; | ||
769 | } | ||
770 | |||
771 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | ||
772 | if (!r) { | ||
773 | dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); | ||
774 | ret = -EIO; | ||
775 | goto err_free_irq0; | ||
776 | } | ||
777 | dev->dcp_irq = r->start; | ||
778 | ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); | ||
779 | if (ret != 0) { | ||
780 | dev_err(&pdev->dev, "can't request_irq (1)\n"); | ||
781 | ret = -EIO; | ||
782 | goto err_free_irq0; | ||
783 | } | ||
784 | |||
785 | dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, | ||
786 | DCP_MAX_PKG * sizeof(struct dcp_hw_packet), | ||
787 | &dev->hw_phys_pkg, | ||
788 | GFP_KERNEL); | ||
789 | if (!dev->hw_pkg[0]) { | ||
790 | dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); | ||
791 | ret = -ENOMEM; | ||
792 | goto err_free_irq1; | ||
793 | } | ||
794 | |||
795 | for (i = 1; i < DCP_MAX_PKG; i++) { | ||
796 | dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg | ||
797 | + i * sizeof(struct dcp_hw_packet); | ||
798 | dev->hw_pkg[i] = dev->hw_pkg[i - 1] + 1; | ||
799 | } | ||
800 | dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg; | ||
801 | |||
802 | |||
803 | dev->payload_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, | ||
804 | &dev->payload_base_dma, GFP_KERNEL); | ||
805 | if (!dev->payload_base) { | ||
806 | dev_err(&pdev->dev, "Could not allocate memory for key\n"); | ||
807 | ret = -ENOMEM; | ||
808 | goto err_free_hw_packet; | ||
809 | } | ||
810 | tasklet_init(&dev->queue_task, dcp_queue_task, | ||
811 | (unsigned long) dev); | ||
812 | tasklet_init(&dev->done_task, dcp_done_task, | ||
813 | (unsigned long) dev); | ||
814 | spin_lock_init(&dev->queue_lock); | ||
815 | |||
816 | crypto_init_queue(&dev->queue, 10); | ||
817 | |||
818 | init_timer(&dev->watchdog); | ||
819 | dev->watchdog.function = &dcp_watchdog; | ||
820 | dev->watchdog.data = (unsigned long)dev; | ||
821 | |||
822 | dev->dcp_bootstream_misc.minor = MISC_DYNAMIC_MINOR, | ||
823 | dev->dcp_bootstream_misc.name = "dcpboot", | ||
824 | dev->dcp_bootstream_misc.fops = &dcp_bootstream_fops, | ||
825 | ret = misc_register(&dev->dcp_bootstream_misc); | ||
826 | if (ret != 0) { | ||
827 | dev_err(dev->dev, "Unable to register misc device\n"); | ||
828 | goto err_free_key_iv; | ||
829 | } | ||
830 | |||
831 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
832 | algs[i].cra_priority = 300; | ||
833 | algs[i].cra_ctxsize = sizeof(struct dcp_op); | ||
834 | algs[i].cra_module = THIS_MODULE; | ||
835 | algs[i].cra_init = dcp_cra_init; | ||
836 | algs[i].cra_exit = dcp_cra_exit; | ||
837 | if (crypto_register_alg(&algs[i])) { | ||
838 | dev_err(&pdev->dev, "register algorithm failed\n"); | ||
839 | ret = -ENOMEM; | ||
840 | goto err_unregister; | ||
841 | } | ||
842 | } | ||
843 | dev_notice(&pdev->dev, "DCP crypto enabled.!\n"); | ||
844 | |||
845 | return 0; | ||
846 | |||
847 | err_unregister: | ||
848 | for (j = 0; j < i; j++) | ||
849 | crypto_unregister_alg(&algs[j]); | ||
850 | err_free_key_iv: | ||
851 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, | ||
852 | dev->payload_base_dma); | ||
853 | err_free_hw_packet: | ||
854 | dma_free_coherent(&pdev->dev, DCP_MAX_PKG * | ||
855 | sizeof(struct dcp_hw_packet), dev->hw_pkg[0], | ||
856 | dev->hw_phys_pkg); | ||
857 | err_free_irq1: | ||
858 | free_irq(dev->dcp_irq, dev); | ||
859 | err_free_irq0: | ||
860 | free_irq(dev->dcp_vmi_irq, dev); | ||
861 | |||
862 | return ret; | ||
863 | } | ||
864 | |||
865 | static int dcp_remove(struct platform_device *pdev) | ||
866 | { | ||
867 | struct dcp_dev *dev; | ||
868 | int j; | ||
869 | dev = platform_get_drvdata(pdev); | ||
870 | |||
871 | dma_free_coherent(&pdev->dev, | ||
872 | DCP_MAX_PKG * sizeof(struct dcp_hw_packet), | ||
873 | dev->hw_pkg[0], dev->hw_phys_pkg); | ||
874 | |||
875 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, | ||
876 | dev->payload_base_dma); | ||
877 | |||
878 | free_irq(dev->dcp_irq, dev); | ||
879 | free_irq(dev->dcp_vmi_irq, dev); | ||
880 | |||
881 | tasklet_kill(&dev->done_task); | ||
882 | tasklet_kill(&dev->queue_task); | ||
883 | |||
884 | for (j = 0; j < ARRAY_SIZE(algs); j++) | ||
885 | crypto_unregister_alg(&algs[j]); | ||
886 | |||
887 | misc_deregister(&dev->dcp_bootstream_misc); | ||
888 | |||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | static struct of_device_id fs_dcp_of_match[] = { | ||
893 | { .compatible = "fsl-dcp"}, | ||
894 | {}, | ||
895 | }; | ||
896 | |||
897 | static struct platform_driver fs_dcp_driver = { | ||
898 | .probe = dcp_probe, | ||
899 | .remove = dcp_remove, | ||
900 | .driver = { | ||
901 | .name = "fsl-dcp", | ||
902 | .owner = THIS_MODULE, | ||
903 | .of_match_table = fs_dcp_of_match | ||
904 | } | ||
905 | }; | ||
906 | |||
907 | module_platform_driver(fs_dcp_driver); | ||
908 | |||
909 | |||
910 | MODULE_AUTHOR("Tobias Rauter <tobias.rauter@gmail.com>"); | ||
911 | MODULE_DESCRIPTION("Freescale DCP Crypto Driver"); | ||
912 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index ebf130e894b5..12fea3e22348 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -2676,7 +2676,7 @@ err_out_stop_device: | |||
2676 | hifn_reset_dma(dev, 1); | 2676 | hifn_reset_dma(dev, 1); |
2677 | hifn_stop_device(dev); | 2677 | hifn_stop_device(dev); |
2678 | err_out_free_irq: | 2678 | err_out_free_irq: |
2679 | free_irq(dev->irq, dev->name); | 2679 | free_irq(dev->irq, dev); |
2680 | tasklet_kill(&dev->tasklet); | 2680 | tasklet_kill(&dev->tasklet); |
2681 | err_out_free_desc: | 2681 | err_out_free_desc: |
2682 | pci_free_consistent(pdev, sizeof(struct hifn_dma), | 2682 | pci_free_consistent(pdev, sizeof(struct hifn_dma), |
@@ -2711,7 +2711,7 @@ static void hifn_remove(struct pci_dev *pdev) | |||
2711 | hifn_reset_dma(dev, 1); | 2711 | hifn_reset_dma(dev, 1); |
2712 | hifn_stop_device(dev); | 2712 | hifn_stop_device(dev); |
2713 | 2713 | ||
2714 | free_irq(dev->irq, dev->name); | 2714 | free_irq(dev->irq, dev); |
2715 | tasklet_kill(&dev->tasklet); | 2715 | tasklet_kill(&dev->tasklet); |
2716 | 2716 | ||
2717 | hifn_flush(dev); | 2717 | hifn_flush(dev); |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index ce6290e5471a..3374a3ebe4c7 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -1146,7 +1146,6 @@ err_unmap_reg: | |||
1146 | err: | 1146 | err: |
1147 | kfree(cp); | 1147 | kfree(cp); |
1148 | cpg = NULL; | 1148 | cpg = NULL; |
1149 | platform_set_drvdata(pdev, NULL); | ||
1150 | return ret; | 1149 | return ret; |
1151 | } | 1150 | } |
1152 | 1151 | ||
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index ee15b0f7849a..5f7980586850 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -203,13 +203,6 @@ static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, | |||
203 | 203 | ||
204 | static int omap_aes_hw_init(struct omap_aes_dev *dd) | 204 | static int omap_aes_hw_init(struct omap_aes_dev *dd) |
205 | { | 205 | { |
206 | /* | ||
207 | * clocks are enabled when request starts and disabled when finished. | ||
208 | * It may be long delays between requests. | ||
209 | * Device might go to off mode to save power. | ||
210 | */ | ||
211 | pm_runtime_get_sync(dd->dev); | ||
212 | |||
213 | if (!(dd->flags & FLAGS_INIT)) { | 206 | if (!(dd->flags & FLAGS_INIT)) { |
214 | dd->flags |= FLAGS_INIT; | 207 | dd->flags |= FLAGS_INIT; |
215 | dd->err = 0; | 208 | dd->err = 0; |
@@ -636,7 +629,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | |||
636 | 629 | ||
637 | pr_debug("err: %d\n", err); | 630 | pr_debug("err: %d\n", err); |
638 | 631 | ||
639 | pm_runtime_put(dd->dev); | ||
640 | dd->flags &= ~FLAGS_BUSY; | 632 | dd->flags &= ~FLAGS_BUSY; |
641 | 633 | ||
642 | req->base.complete(&req->base, err); | 634 | req->base.complete(&req->base, err); |
@@ -837,8 +829,16 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) | |||
837 | 829 | ||
838 | static int omap_aes_cra_init(struct crypto_tfm *tfm) | 830 | static int omap_aes_cra_init(struct crypto_tfm *tfm) |
839 | { | 831 | { |
840 | pr_debug("enter\n"); | 832 | struct omap_aes_dev *dd = NULL; |
833 | |||
834 | /* Find AES device, currently picks the first device */ | ||
835 | spin_lock_bh(&list_lock); | ||
836 | list_for_each_entry(dd, &dev_list, list) { | ||
837 | break; | ||
838 | } | ||
839 | spin_unlock_bh(&list_lock); | ||
841 | 840 | ||
841 | pm_runtime_get_sync(dd->dev); | ||
842 | tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); | 842 | tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); |
843 | 843 | ||
844 | return 0; | 844 | return 0; |
@@ -846,7 +846,16 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm) | |||
846 | 846 | ||
847 | static void omap_aes_cra_exit(struct crypto_tfm *tfm) | 847 | static void omap_aes_cra_exit(struct crypto_tfm *tfm) |
848 | { | 848 | { |
849 | pr_debug("enter\n"); | 849 | struct omap_aes_dev *dd = NULL; |
850 | |||
851 | /* Find AES device, currently picks the first device */ | ||
852 | spin_lock_bh(&list_lock); | ||
853 | list_for_each_entry(dd, &dev_list, list) { | ||
854 | break; | ||
855 | } | ||
856 | spin_unlock_bh(&list_lock); | ||
857 | |||
858 | pm_runtime_put_sync(dd->dev); | ||
850 | } | 859 | } |
851 | 860 | ||
852 | /* ********************** ALGS ************************************ */ | 861 | /* ********************** ALGS ************************************ */ |
@@ -1125,10 +1134,9 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
1125 | if (err) | 1134 | if (err) |
1126 | goto err_res; | 1135 | goto err_res; |
1127 | 1136 | ||
1128 | dd->io_base = devm_request_and_ioremap(dev, &res); | 1137 | dd->io_base = devm_ioremap_resource(dev, &res); |
1129 | if (!dd->io_base) { | 1138 | if (IS_ERR(dd->io_base)) { |
1130 | dev_err(dev, "can't ioremap\n"); | 1139 | err = PTR_ERR(dd->io_base); |
1131 | err = -ENOMEM; | ||
1132 | goto err_res; | 1140 | goto err_res; |
1133 | } | 1141 | } |
1134 | dd->phys_base = res.start; | 1142 | dd->phys_base = res.start; |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a1e1b4756ee5..4bb67652c200 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -1686,10 +1686,9 @@ static int omap_sham_probe(struct platform_device *pdev) | |||
1686 | if (err) | 1686 | if (err) |
1687 | goto res_err; | 1687 | goto res_err; |
1688 | 1688 | ||
1689 | dd->io_base = devm_request_and_ioremap(dev, &res); | 1689 | dd->io_base = devm_ioremap_resource(dev, &res); |
1690 | if (!dd->io_base) { | 1690 | if (IS_ERR(dd->io_base)) { |
1691 | dev_err(dev, "can't ioremap\n"); | 1691 | err = PTR_ERR(dd->io_base); |
1692 | err = -ENOMEM; | ||
1693 | goto res_err; | 1692 | goto res_err; |
1694 | } | 1693 | } |
1695 | dd->phys_base = res.start; | 1694 | dd->phys_base = res.start; |
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index ac30724d923d..888f7f4a6d3f 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -1298,7 +1298,7 @@ static ssize_t spacc_stat_irq_thresh_store(struct device *dev, | |||
1298 | struct spacc_engine *engine = spacc_dev_to_engine(dev); | 1298 | struct spacc_engine *engine = spacc_dev_to_engine(dev); |
1299 | unsigned long thresh; | 1299 | unsigned long thresh; |
1300 | 1300 | ||
1301 | if (strict_strtoul(buf, 0, &thresh)) | 1301 | if (kstrtoul(buf, 0, &thresh)) |
1302 | return -EINVAL; | 1302 | return -EINVAL; |
1303 | 1303 | ||
1304 | thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); | 1304 | thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); |
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 4b314326f48a..cf149b19ff47 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -647,7 +647,6 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
647 | clk_disable(pdata->clk); | 647 | clk_disable(pdata->clk); |
648 | 648 | ||
649 | s5p_dev = NULL; | 649 | s5p_dev = NULL; |
650 | platform_set_drvdata(pdev, NULL); | ||
651 | 650 | ||
652 | return err; | 651 | return err; |
653 | } | 652 | } |
@@ -668,7 +667,6 @@ static int s5p_aes_remove(struct platform_device *pdev) | |||
668 | clk_disable(pdata->clk); | 667 | clk_disable(pdata->clk); |
669 | 668 | ||
670 | s5p_dev = NULL; | 669 | s5p_dev = NULL; |
671 | platform_set_drvdata(pdev, NULL); | ||
672 | 670 | ||
673 | return 0; | 671 | return 0; |
674 | } | 672 | } |
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 83d79b964d12..a999f537228f 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -1629,7 +1629,7 @@ static int ux500_cryp_remove(struct platform_device *pdev) | |||
1629 | 1629 | ||
1630 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1630 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1631 | if (res) | 1631 | if (res) |
1632 | release_mem_region(res->start, res->end - res->start + 1); | 1632 | release_mem_region(res->start, resource_size(res)); |
1633 | 1633 | ||
1634 | kfree(device_data); | 1634 | kfree(device_data); |
1635 | 1635 | ||