diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-02 17:53:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-02 17:53:12 -0400 |
commit | 797994f81a8b2bdca2eecffa415c1e7a89a4f961 (patch) | |
tree | 1383dc469c26ad37fdf960f682d9a48c782935c5 /drivers/crypto | |
parent | c8d8566952fda026966784a62f324c8352f77430 (diff) | |
parent | 3862de1f6c442d53bd828d39f86d07d933a70605 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- XTS mode optimisation for twofish/cast6/camellia/aes on x86
- AVX2/x86_64 implementation for blowfish/twofish/serpent/camellia
- SSSE3/AVX/AVX2 optimisations for sha256/sha512
- Added driver for SAHARA2 crypto accelerator
- Fix for GMAC when used in non-IPsec secnarios
- Added generic CMAC implementation (including IPsec glue)
- IP update for crypto/atmel
- Support for more than one device in hwrng/timeriomem
- Added Broadcom BCM2835 RNG driver
- Misc fixes
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (59 commits)
crypto: caam - fix job ring cleanup code
crypto: camellia - add AVX2/AES-NI/x86_64 assembler implementation of camellia cipher
crypto: serpent - add AVX2/x86_64 assembler implementation of serpent cipher
crypto: twofish - add AVX2/x86_64 assembler implementation of twofish cipher
crypto: blowfish - add AVX2/x86_64 implementation of blowfish cipher
crypto: tcrypt - add async cipher speed tests for blowfish
crypto: testmgr - extend camellia test-vectors for camellia-aesni/avx2
crypto: aesni_intel - fix Kconfig problem with CRYPTO_GLUE_HELPER_X86
crypto: aesni_intel - add more optimized XTS mode for x86-64
crypto: x86/camellia-aesni-avx - add more optimized XTS code
crypto: cast6-avx: use new optimized XTS code
crypto: x86/twofish-avx - use optimized XTS code
crypto: x86 - add more optimized XTS-mode for serpent-avx
xfrm: add rfc4494 AES-CMAC-96 support
crypto: add CMAC support to CryptoAPI
crypto: testmgr - add empty test vectors for null ciphers
crypto: testmgr - add AES GMAC test vectors
crypto: gcm - fix rfc4543 to handle async crypto correctly
crypto: gcm - make GMAC work when dst and src are different
hwrng: timeriomem - added devicetree hooks
...
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 18 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/atmel-aes.c | 471 | ||||
-rw-r--r-- | drivers/crypto/atmel-sha-regs.h | 7 | ||||
-rw-r--r-- | drivers/crypto/atmel-sha.c | 586 | ||||
-rw-r--r-- | drivers/crypto/atmel-tdes-regs.h | 2 | ||||
-rw-r--r-- | drivers/crypto/atmel-tdes.c | 394 | ||||
-rw-r--r-- | drivers/crypto/bfin_crc.c | 6 | ||||
-rw-r--r-- | drivers/crypto/caam/Kconfig | 2 | ||||
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 6 | ||||
-rw-r--r-- | drivers/crypto/caam/caamhash.c | 4 | ||||
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 3 | ||||
-rw-r--r-- | drivers/crypto/caam/error.c | 10 | ||||
-rw-r--r-- | drivers/crypto/caam/intern.h | 1 | ||||
-rw-r--r-- | drivers/crypto/caam/jr.c | 4 | ||||
-rw-r--r-- | drivers/crypto/caam/key_gen.c | 2 | ||||
-rw-r--r-- | drivers/crypto/caam/key_gen.h | 2 | ||||
-rw-r--r-- | drivers/crypto/caam/regs.h | 4 | ||||
-rw-r--r-- | drivers/crypto/omap-aes.c | 15 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 15 | ||||
-rw-r--r-- | drivers/crypto/picoxcell_crypto.c | 4 | ||||
-rw-r--r-- | drivers/crypto/sahara.c | 1070 | ||||
-rw-r--r-- | drivers/crypto/ux500/hash/hash_core.c | 6 |
23 files changed, 2312 insertions, 321 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 87ec4d027c25..dffb85525368 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -276,6 +276,16 @@ config CRYPTO_DEV_PICOXCELL | |||
276 | 276 | ||
277 | Saying m here will build a module named pipcoxcell_crypto. | 277 | Saying m here will build a module named pipcoxcell_crypto. |
278 | 278 | ||
279 | config CRYPTO_DEV_SAHARA | ||
280 | tristate "Support for SAHARA crypto accelerator" | ||
281 | depends on ARCH_MXC && EXPERIMENTAL && OF | ||
282 | select CRYPTO_BLKCIPHER | ||
283 | select CRYPTO_AES | ||
284 | select CRYPTO_ECB | ||
285 | help | ||
286 | This option enables support for the SAHARA HW crypto accelerator | ||
287 | found in some Freescale i.MX chips. | ||
288 | |||
279 | config CRYPTO_DEV_S5P | 289 | config CRYPTO_DEV_S5P |
280 | tristate "Support for Samsung S5PV210 crypto accelerator" | 290 | tristate "Support for Samsung S5PV210 crypto accelerator" |
281 | depends on ARCH_S5PV210 | 291 | depends on ARCH_S5PV210 |
@@ -361,15 +371,17 @@ config CRYPTO_DEV_ATMEL_TDES | |||
361 | will be called atmel-tdes. | 371 | will be called atmel-tdes. |
362 | 372 | ||
363 | config CRYPTO_DEV_ATMEL_SHA | 373 | config CRYPTO_DEV_ATMEL_SHA |
364 | tristate "Support for Atmel SHA1/SHA256 hw accelerator" | 374 | tristate "Support for Atmel SHA hw accelerator" |
365 | depends on ARCH_AT91 | 375 | depends on ARCH_AT91 |
366 | select CRYPTO_SHA1 | 376 | select CRYPTO_SHA1 |
367 | select CRYPTO_SHA256 | 377 | select CRYPTO_SHA256 |
378 | select CRYPTO_SHA512 | ||
368 | select CRYPTO_ALGAPI | 379 | select CRYPTO_ALGAPI |
369 | help | 380 | help |
370 | Some Atmel processors have SHA1/SHA256 hw accelerator. | 381 | Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512 |
382 | hw accelerator. | ||
371 | Select this if you want to use the Atmel module for | 383 | Select this if you want to use the Atmel module for |
372 | SHA1/SHA256 algorithms. | 384 | SHA1/SHA224/SHA256/SHA384/SHA512 algorithms. |
373 | 385 | ||
374 | To compile this driver as a module, choose M here: the module | 386 | To compile this driver as a module, choose M here: the module |
375 | will be called atmel-sha. | 387 | will be called atmel-sha. |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 880a47b0b023..38ce13d3b79b 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -12,6 +12,7 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ | |||
12 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o | 12 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o |
13 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o | 13 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o |
14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o | 14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o |
15 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o | ||
15 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | 16 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o |
16 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o | 17 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o |
17 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ | 18 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ |
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 6f22ba51f969..c1efd910d97b 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <crypto/aes.h> | 38 | #include <crypto/aes.h> |
39 | #include <crypto/hash.h> | 39 | #include <crypto/hash.h> |
40 | #include <crypto/internal/hash.h> | 40 | #include <crypto/internal/hash.h> |
41 | #include <linux/platform_data/atmel-aes.h> | 41 | #include <linux/platform_data/crypto-atmel.h> |
42 | #include "atmel-aes-regs.h" | 42 | #include "atmel-aes-regs.h" |
43 | 43 | ||
44 | #define CFB8_BLOCK_SIZE 1 | 44 | #define CFB8_BLOCK_SIZE 1 |
@@ -47,7 +47,7 @@ | |||
47 | #define CFB64_BLOCK_SIZE 8 | 47 | #define CFB64_BLOCK_SIZE 8 |
48 | 48 | ||
49 | /* AES flags */ | 49 | /* AES flags */ |
50 | #define AES_FLAGS_MODE_MASK 0x01ff | 50 | #define AES_FLAGS_MODE_MASK 0x03ff |
51 | #define AES_FLAGS_ENCRYPT BIT(0) | 51 | #define AES_FLAGS_ENCRYPT BIT(0) |
52 | #define AES_FLAGS_CBC BIT(1) | 52 | #define AES_FLAGS_CBC BIT(1) |
53 | #define AES_FLAGS_CFB BIT(2) | 53 | #define AES_FLAGS_CFB BIT(2) |
@@ -55,21 +55,26 @@ | |||
55 | #define AES_FLAGS_CFB16 BIT(4) | 55 | #define AES_FLAGS_CFB16 BIT(4) |
56 | #define AES_FLAGS_CFB32 BIT(5) | 56 | #define AES_FLAGS_CFB32 BIT(5) |
57 | #define AES_FLAGS_CFB64 BIT(6) | 57 | #define AES_FLAGS_CFB64 BIT(6) |
58 | #define AES_FLAGS_OFB BIT(7) | 58 | #define AES_FLAGS_CFB128 BIT(7) |
59 | #define AES_FLAGS_CTR BIT(8) | 59 | #define AES_FLAGS_OFB BIT(8) |
60 | #define AES_FLAGS_CTR BIT(9) | ||
60 | 61 | ||
61 | #define AES_FLAGS_INIT BIT(16) | 62 | #define AES_FLAGS_INIT BIT(16) |
62 | #define AES_FLAGS_DMA BIT(17) | 63 | #define AES_FLAGS_DMA BIT(17) |
63 | #define AES_FLAGS_BUSY BIT(18) | 64 | #define AES_FLAGS_BUSY BIT(18) |
65 | #define AES_FLAGS_FAST BIT(19) | ||
64 | 66 | ||
65 | #define AES_FLAGS_DUALBUFF BIT(24) | 67 | #define ATMEL_AES_QUEUE_LENGTH 50 |
66 | |||
67 | #define ATMEL_AES_QUEUE_LENGTH 1 | ||
68 | #define ATMEL_AES_CACHE_SIZE 0 | ||
69 | 68 | ||
70 | #define ATMEL_AES_DMA_THRESHOLD 16 | 69 | #define ATMEL_AES_DMA_THRESHOLD 16 |
71 | 70 | ||
72 | 71 | ||
72 | struct atmel_aes_caps { | ||
73 | bool has_dualbuff; | ||
74 | bool has_cfb64; | ||
75 | u32 max_burst_size; | ||
76 | }; | ||
77 | |||
73 | struct atmel_aes_dev; | 78 | struct atmel_aes_dev; |
74 | 79 | ||
75 | struct atmel_aes_ctx { | 80 | struct atmel_aes_ctx { |
@@ -77,6 +82,8 @@ struct atmel_aes_ctx { | |||
77 | 82 | ||
78 | int keylen; | 83 | int keylen; |
79 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | 84 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; |
85 | |||
86 | u16 block_size; | ||
80 | }; | 87 | }; |
81 | 88 | ||
82 | struct atmel_aes_reqctx { | 89 | struct atmel_aes_reqctx { |
@@ -112,20 +119,27 @@ struct atmel_aes_dev { | |||
112 | 119 | ||
113 | struct scatterlist *in_sg; | 120 | struct scatterlist *in_sg; |
114 | unsigned int nb_in_sg; | 121 | unsigned int nb_in_sg; |
115 | 122 | size_t in_offset; | |
116 | struct scatterlist *out_sg; | 123 | struct scatterlist *out_sg; |
117 | unsigned int nb_out_sg; | 124 | unsigned int nb_out_sg; |
125 | size_t out_offset; | ||
118 | 126 | ||
119 | size_t bufcnt; | 127 | size_t bufcnt; |
128 | size_t buflen; | ||
129 | size_t dma_size; | ||
120 | 130 | ||
121 | u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32)); | 131 | void *buf_in; |
122 | int dma_in; | 132 | int dma_in; |
133 | dma_addr_t dma_addr_in; | ||
123 | struct atmel_aes_dma dma_lch_in; | 134 | struct atmel_aes_dma dma_lch_in; |
124 | 135 | ||
125 | u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32)); | 136 | void *buf_out; |
126 | int dma_out; | 137 | int dma_out; |
138 | dma_addr_t dma_addr_out; | ||
127 | struct atmel_aes_dma dma_lch_out; | 139 | struct atmel_aes_dma dma_lch_out; |
128 | 140 | ||
141 | struct atmel_aes_caps caps; | ||
142 | |||
129 | u32 hw_version; | 143 | u32 hw_version; |
130 | }; | 144 | }; |
131 | 145 | ||
@@ -165,6 +179,37 @@ static int atmel_aes_sg_length(struct ablkcipher_request *req, | |||
165 | return sg_nb; | 179 | return sg_nb; |
166 | } | 180 | } |
167 | 181 | ||
182 | static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset, | ||
183 | void *buf, size_t buflen, size_t total, int out) | ||
184 | { | ||
185 | unsigned int count, off = 0; | ||
186 | |||
187 | while (buflen && total) { | ||
188 | count = min((*sg)->length - *offset, total); | ||
189 | count = min(count, buflen); | ||
190 | |||
191 | if (!count) | ||
192 | return off; | ||
193 | |||
194 | scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); | ||
195 | |||
196 | off += count; | ||
197 | buflen -= count; | ||
198 | *offset += count; | ||
199 | total -= count; | ||
200 | |||
201 | if (*offset == (*sg)->length) { | ||
202 | *sg = sg_next(*sg); | ||
203 | if (*sg) | ||
204 | *offset = 0; | ||
205 | else | ||
206 | total = 0; | ||
207 | } | ||
208 | } | ||
209 | |||
210 | return off; | ||
211 | } | ||
212 | |||
168 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) | 213 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) |
169 | { | 214 | { |
170 | return readl_relaxed(dd->io_base + offset); | 215 | return readl_relaxed(dd->io_base + offset); |
@@ -190,14 +235,6 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, | |||
190 | atmel_aes_write(dd, offset, *value); | 235 | atmel_aes_write(dd, offset, *value); |
191 | } | 236 | } |
192 | 237 | ||
193 | static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd) | ||
194 | { | ||
195 | atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF); | ||
196 | |||
197 | if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF) | ||
198 | dd->flags |= AES_FLAGS_DUALBUFF; | ||
199 | } | ||
200 | |||
201 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) | 238 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) |
202 | { | 239 | { |
203 | struct atmel_aes_dev *aes_dd = NULL; | 240 | struct atmel_aes_dev *aes_dd = NULL; |
@@ -225,7 +262,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | |||
225 | 262 | ||
226 | if (!(dd->flags & AES_FLAGS_INIT)) { | 263 | if (!(dd->flags & AES_FLAGS_INIT)) { |
227 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); | 264 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); |
228 | atmel_aes_dualbuff_test(dd); | 265 | atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); |
229 | dd->flags |= AES_FLAGS_INIT; | 266 | dd->flags |= AES_FLAGS_INIT; |
230 | dd->err = 0; | 267 | dd->err = 0; |
231 | } | 268 | } |
@@ -233,11 +270,19 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | |||
233 | return 0; | 270 | return 0; |
234 | } | 271 | } |
235 | 272 | ||
273 | static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) | ||
274 | { | ||
275 | return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; | ||
276 | } | ||
277 | |||
236 | static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) | 278 | static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) |
237 | { | 279 | { |
238 | atmel_aes_hw_init(dd); | 280 | atmel_aes_hw_init(dd); |
239 | 281 | ||
240 | dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION); | 282 | dd->hw_version = atmel_aes_get_version(dd); |
283 | |||
284 | dev_info(dd->dev, | ||
285 | "version: 0x%x\n", dd->hw_version); | ||
241 | 286 | ||
242 | clk_disable_unprepare(dd->iclk); | 287 | clk_disable_unprepare(dd->iclk); |
243 | } | 288 | } |
@@ -260,50 +305,77 @@ static void atmel_aes_dma_callback(void *data) | |||
260 | tasklet_schedule(&dd->done_task); | 305 | tasklet_schedule(&dd->done_task); |
261 | } | 306 | } |
262 | 307 | ||
263 | static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd) | 308 | static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, |
309 | dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length) | ||
264 | { | 310 | { |
311 | struct scatterlist sg[2]; | ||
265 | struct dma_async_tx_descriptor *in_desc, *out_desc; | 312 | struct dma_async_tx_descriptor *in_desc, *out_desc; |
266 | int nb_dma_sg_in, nb_dma_sg_out; | ||
267 | 313 | ||
268 | dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); | 314 | dd->dma_size = length; |
269 | if (!dd->nb_in_sg) | ||
270 | goto exit_err; | ||
271 | 315 | ||
272 | nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg, | 316 | if (!(dd->flags & AES_FLAGS_FAST)) { |
273 | DMA_TO_DEVICE); | 317 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, |
274 | if (!nb_dma_sg_in) | 318 | DMA_TO_DEVICE); |
275 | goto exit_err; | 319 | } |
276 | 320 | ||
277 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg, | 321 | if (dd->flags & AES_FLAGS_CFB8) { |
278 | nb_dma_sg_in, DMA_MEM_TO_DEV, | 322 | dd->dma_lch_in.dma_conf.dst_addr_width = |
279 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 323 | DMA_SLAVE_BUSWIDTH_1_BYTE; |
324 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
325 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
326 | } else if (dd->flags & AES_FLAGS_CFB16) { | ||
327 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
328 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
329 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
330 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
331 | } else { | ||
332 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
333 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
334 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
335 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
336 | } | ||
280 | 337 | ||
281 | if (!in_desc) | 338 | if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 | |
282 | goto unmap_in; | 339 | AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) { |
340 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | ||
341 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
342 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | ||
343 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | ||
344 | } else { | ||
345 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
346 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
347 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
348 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
349 | } | ||
283 | 350 | ||
284 | /* callback not needed */ | 351 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); |
352 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | ||
285 | 353 | ||
286 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | 354 | dd->flags |= AES_FLAGS_DMA; |
287 | if (!dd->nb_out_sg) | ||
288 | goto unmap_in; | ||
289 | 355 | ||
290 | nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg, | 356 | sg_init_table(&sg[0], 1); |
291 | DMA_FROM_DEVICE); | 357 | sg_dma_address(&sg[0]) = dma_addr_in; |
292 | if (!nb_dma_sg_out) | 358 | sg_dma_len(&sg[0]) = length; |
293 | goto unmap_out; | ||
294 | 359 | ||
295 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg, | 360 | sg_init_table(&sg[1], 1); |
296 | nb_dma_sg_out, DMA_DEV_TO_MEM, | 361 | sg_dma_address(&sg[1]) = dma_addr_out; |
297 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 362 | sg_dma_len(&sg[1]) = length; |
363 | |||
364 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], | ||
365 | 1, DMA_MEM_TO_DEV, | ||
366 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
367 | if (!in_desc) | ||
368 | return -EINVAL; | ||
298 | 369 | ||
370 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], | ||
371 | 1, DMA_DEV_TO_MEM, | ||
372 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
299 | if (!out_desc) | 373 | if (!out_desc) |
300 | goto unmap_out; | 374 | return -EINVAL; |
301 | 375 | ||
302 | out_desc->callback = atmel_aes_dma_callback; | 376 | out_desc->callback = atmel_aes_dma_callback; |
303 | out_desc->callback_param = dd; | 377 | out_desc->callback_param = dd; |
304 | 378 | ||
305 | dd->total -= dd->req->nbytes; | ||
306 | |||
307 | dmaengine_submit(out_desc); | 379 | dmaengine_submit(out_desc); |
308 | dma_async_issue_pending(dd->dma_lch_out.chan); | 380 | dma_async_issue_pending(dd->dma_lch_out.chan); |
309 | 381 | ||
@@ -311,15 +383,6 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd) | |||
311 | dma_async_issue_pending(dd->dma_lch_in.chan); | 383 | dma_async_issue_pending(dd->dma_lch_in.chan); |
312 | 384 | ||
313 | return 0; | 385 | return 0; |
314 | |||
315 | unmap_out: | ||
316 | dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg, | ||
317 | DMA_FROM_DEVICE); | ||
318 | unmap_in: | ||
319 | dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg, | ||
320 | DMA_TO_DEVICE); | ||
321 | exit_err: | ||
322 | return -EINVAL; | ||
323 | } | 386 | } |
324 | 387 | ||
325 | static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | 388 | static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) |
@@ -352,30 +415,66 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | |||
352 | 415 | ||
353 | static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) | 416 | static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) |
354 | { | 417 | { |
355 | int err; | 418 | int err, fast = 0, in, out; |
419 | size_t count; | ||
420 | dma_addr_t addr_in, addr_out; | ||
421 | |||
422 | if ((!dd->in_offset) && (!dd->out_offset)) { | ||
423 | /* check for alignment */ | ||
424 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && | ||
425 | IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); | ||
426 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && | ||
427 | IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); | ||
428 | fast = in && out; | ||
429 | |||
430 | if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) | ||
431 | fast = 0; | ||
432 | } | ||
433 | |||
434 | |||
435 | if (fast) { | ||
436 | count = min(dd->total, sg_dma_len(dd->in_sg)); | ||
437 | count = min(count, sg_dma_len(dd->out_sg)); | ||
438 | |||
439 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
440 | if (!err) { | ||
441 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | |||
445 | err = dma_map_sg(dd->dev, dd->out_sg, 1, | ||
446 | DMA_FROM_DEVICE); | ||
447 | if (!err) { | ||
448 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
449 | dma_unmap_sg(dd->dev, dd->in_sg, 1, | ||
450 | DMA_TO_DEVICE); | ||
451 | return -EINVAL; | ||
452 | } | ||
453 | |||
454 | addr_in = sg_dma_address(dd->in_sg); | ||
455 | addr_out = sg_dma_address(dd->out_sg); | ||
456 | |||
457 | dd->flags |= AES_FLAGS_FAST; | ||
356 | 458 | ||
357 | if (dd->flags & AES_FLAGS_CFB8) { | ||
358 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
359 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
360 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
361 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
362 | } else if (dd->flags & AES_FLAGS_CFB16) { | ||
363 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
364 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
365 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
366 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
367 | } else { | 459 | } else { |
368 | dd->dma_lch_in.dma_conf.dst_addr_width = | 460 | /* use cache buffers */ |
369 | DMA_SLAVE_BUSWIDTH_4_BYTES; | 461 | count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, |
370 | dd->dma_lch_out.dma_conf.src_addr_width = | 462 | dd->buf_in, dd->buflen, dd->total, 0); |
371 | DMA_SLAVE_BUSWIDTH_4_BYTES; | 463 | |
464 | addr_in = dd->dma_addr_in; | ||
465 | addr_out = dd->dma_addr_out; | ||
466 | |||
467 | dd->flags &= ~AES_FLAGS_FAST; | ||
372 | } | 468 | } |
373 | 469 | ||
374 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | 470 | dd->total -= count; |
375 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | ||
376 | 471 | ||
377 | dd->flags |= AES_FLAGS_DMA; | 472 | err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count); |
378 | err = atmel_aes_crypt_dma(dd); | 473 | |
474 | if (err && (dd->flags & AES_FLAGS_FAST)) { | ||
475 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
476 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | ||
477 | } | ||
379 | 478 | ||
380 | return err; | 479 | return err; |
381 | } | 480 | } |
@@ -410,6 +509,8 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) | |||
410 | valmr |= AES_MR_CFBS_32b; | 509 | valmr |= AES_MR_CFBS_32b; |
411 | else if (dd->flags & AES_FLAGS_CFB64) | 510 | else if (dd->flags & AES_FLAGS_CFB64) |
412 | valmr |= AES_MR_CFBS_64b; | 511 | valmr |= AES_MR_CFBS_64b; |
512 | else if (dd->flags & AES_FLAGS_CFB128) | ||
513 | valmr |= AES_MR_CFBS_128b; | ||
413 | } else if (dd->flags & AES_FLAGS_OFB) { | 514 | } else if (dd->flags & AES_FLAGS_OFB) { |
414 | valmr |= AES_MR_OPMOD_OFB; | 515 | valmr |= AES_MR_OPMOD_OFB; |
415 | } else if (dd->flags & AES_FLAGS_CTR) { | 516 | } else if (dd->flags & AES_FLAGS_CTR) { |
@@ -423,7 +524,7 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) | |||
423 | 524 | ||
424 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) { | 525 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) { |
425 | valmr |= AES_MR_SMOD_IDATAR0; | 526 | valmr |= AES_MR_SMOD_IDATAR0; |
426 | if (dd->flags & AES_FLAGS_DUALBUFF) | 527 | if (dd->caps.has_dualbuff) |
427 | valmr |= AES_MR_DUALBUFF; | 528 | valmr |= AES_MR_DUALBUFF; |
428 | } else { | 529 | } else { |
429 | valmr |= AES_MR_SMOD_AUTO; | 530 | valmr |= AES_MR_SMOD_AUTO; |
@@ -477,7 +578,9 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | |||
477 | /* assign new request to device */ | 578 | /* assign new request to device */ |
478 | dd->req = req; | 579 | dd->req = req; |
479 | dd->total = req->nbytes; | 580 | dd->total = req->nbytes; |
581 | dd->in_offset = 0; | ||
480 | dd->in_sg = req->src; | 582 | dd->in_sg = req->src; |
583 | dd->out_offset = 0; | ||
481 | dd->out_sg = req->dst; | 584 | dd->out_sg = req->dst; |
482 | 585 | ||
483 | rctx = ablkcipher_request_ctx(req); | 586 | rctx = ablkcipher_request_ctx(req); |
@@ -506,18 +609,86 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | |||
506 | static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) | 609 | static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) |
507 | { | 610 | { |
508 | int err = -EINVAL; | 611 | int err = -EINVAL; |
612 | size_t count; | ||
509 | 613 | ||
510 | if (dd->flags & AES_FLAGS_DMA) { | 614 | if (dd->flags & AES_FLAGS_DMA) { |
511 | dma_unmap_sg(dd->dev, dd->out_sg, | ||
512 | dd->nb_out_sg, DMA_FROM_DEVICE); | ||
513 | dma_unmap_sg(dd->dev, dd->in_sg, | ||
514 | dd->nb_in_sg, DMA_TO_DEVICE); | ||
515 | err = 0; | 615 | err = 0; |
616 | if (dd->flags & AES_FLAGS_FAST) { | ||
617 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
618 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
619 | } else { | ||
620 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | ||
621 | dd->dma_size, DMA_FROM_DEVICE); | ||
622 | |||
623 | /* copy data */ | ||
624 | count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, | ||
625 | dd->buf_out, dd->buflen, dd->dma_size, 1); | ||
626 | if (count != dd->dma_size) { | ||
627 | err = -EINVAL; | ||
628 | pr_err("not all data converted: %u\n", count); | ||
629 | } | ||
630 | } | ||
516 | } | 631 | } |
517 | 632 | ||
518 | return err; | 633 | return err; |
519 | } | 634 | } |
520 | 635 | ||
636 | |||
637 | static int atmel_aes_buff_init(struct atmel_aes_dev *dd) | ||
638 | { | ||
639 | int err = -ENOMEM; | ||
640 | |||
641 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); | ||
642 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); | ||
643 | dd->buflen = PAGE_SIZE; | ||
644 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | ||
645 | |||
646 | if (!dd->buf_in || !dd->buf_out) { | ||
647 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
648 | goto err_alloc; | ||
649 | } | ||
650 | |||
651 | /* MAP here */ | ||
652 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, | ||
653 | dd->buflen, DMA_TO_DEVICE); | ||
654 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
655 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
656 | err = -EINVAL; | ||
657 | goto err_map_in; | ||
658 | } | ||
659 | |||
660 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, | ||
661 | dd->buflen, DMA_FROM_DEVICE); | ||
662 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | ||
663 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
664 | err = -EINVAL; | ||
665 | goto err_map_out; | ||
666 | } | ||
667 | |||
668 | return 0; | ||
669 | |||
670 | err_map_out: | ||
671 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
672 | DMA_TO_DEVICE); | ||
673 | err_map_in: | ||
674 | free_page((unsigned long)dd->buf_out); | ||
675 | free_page((unsigned long)dd->buf_in); | ||
676 | err_alloc: | ||
677 | if (err) | ||
678 | pr_err("error: %d\n", err); | ||
679 | return err; | ||
680 | } | ||
681 | |||
682 | static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) | ||
683 | { | ||
684 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
685 | DMA_FROM_DEVICE); | ||
686 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
687 | DMA_TO_DEVICE); | ||
688 | free_page((unsigned long)dd->buf_out); | ||
689 | free_page((unsigned long)dd->buf_in); | ||
690 | } | ||
691 | |||
521 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | 692 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) |
522 | { | 693 | { |
523 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( | 694 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( |
@@ -525,9 +696,30 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
525 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | 696 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); |
526 | struct atmel_aes_dev *dd; | 697 | struct atmel_aes_dev *dd; |
527 | 698 | ||
528 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | 699 | if (mode & AES_FLAGS_CFB8) { |
529 | pr_err("request size is not exact amount of AES blocks\n"); | 700 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { |
530 | return -EINVAL; | 701 | pr_err("request size is not exact amount of CFB8 blocks\n"); |
702 | return -EINVAL; | ||
703 | } | ||
704 | ctx->block_size = CFB8_BLOCK_SIZE; | ||
705 | } else if (mode & AES_FLAGS_CFB16) { | ||
706 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { | ||
707 | pr_err("request size is not exact amount of CFB16 blocks\n"); | ||
708 | return -EINVAL; | ||
709 | } | ||
710 | ctx->block_size = CFB16_BLOCK_SIZE; | ||
711 | } else if (mode & AES_FLAGS_CFB32) { | ||
712 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { | ||
713 | pr_err("request size is not exact amount of CFB32 blocks\n"); | ||
714 | return -EINVAL; | ||
715 | } | ||
716 | ctx->block_size = CFB32_BLOCK_SIZE; | ||
717 | } else { | ||
718 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
719 | pr_err("request size is not exact amount of AES blocks\n"); | ||
720 | return -EINVAL; | ||
721 | } | ||
722 | ctx->block_size = AES_BLOCK_SIZE; | ||
531 | } | 723 | } |
532 | 724 | ||
533 | dd = atmel_aes_find_dev(ctx); | 725 | dd = atmel_aes_find_dev(ctx); |
@@ -551,14 +743,12 @@ static bool atmel_aes_filter(struct dma_chan *chan, void *slave) | |||
551 | } | 743 | } |
552 | } | 744 | } |
553 | 745 | ||
554 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd) | 746 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd, |
747 | struct crypto_platform_data *pdata) | ||
555 | { | 748 | { |
556 | int err = -ENOMEM; | 749 | int err = -ENOMEM; |
557 | struct aes_platform_data *pdata; | ||
558 | dma_cap_mask_t mask_in, mask_out; | 750 | dma_cap_mask_t mask_in, mask_out; |
559 | 751 | ||
560 | pdata = dd->dev->platform_data; | ||
561 | |||
562 | if (pdata && pdata->dma_slave->txdata.dma_dev && | 752 | if (pdata && pdata->dma_slave->txdata.dma_dev && |
563 | pdata->dma_slave->rxdata.dma_dev) { | 753 | pdata->dma_slave->rxdata.dma_dev) { |
564 | 754 | ||
@@ -568,28 +758,38 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd) | |||
568 | 758 | ||
569 | dd->dma_lch_in.chan = dma_request_channel(mask_in, | 759 | dd->dma_lch_in.chan = dma_request_channel(mask_in, |
570 | atmel_aes_filter, &pdata->dma_slave->rxdata); | 760 | atmel_aes_filter, &pdata->dma_slave->rxdata); |
761 | |||
571 | if (!dd->dma_lch_in.chan) | 762 | if (!dd->dma_lch_in.chan) |
572 | goto err_dma_in; | 763 | goto err_dma_in; |
573 | 764 | ||
574 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | 765 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; |
575 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | 766 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + |
576 | AES_IDATAR(0); | 767 | AES_IDATAR(0); |
577 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | 768 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; |
578 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | 769 | dd->dma_lch_in.dma_conf.src_addr_width = |
770 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
771 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
772 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
773 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
579 | dd->dma_lch_in.dma_conf.device_fc = false; | 774 | dd->dma_lch_in.dma_conf.device_fc = false; |
580 | 775 | ||
581 | dma_cap_zero(mask_out); | 776 | dma_cap_zero(mask_out); |
582 | dma_cap_set(DMA_SLAVE, mask_out); | 777 | dma_cap_set(DMA_SLAVE, mask_out); |
583 | dd->dma_lch_out.chan = dma_request_channel(mask_out, | 778 | dd->dma_lch_out.chan = dma_request_channel(mask_out, |
584 | atmel_aes_filter, &pdata->dma_slave->txdata); | 779 | atmel_aes_filter, &pdata->dma_slave->txdata); |
780 | |||
585 | if (!dd->dma_lch_out.chan) | 781 | if (!dd->dma_lch_out.chan) |
586 | goto err_dma_out; | 782 | goto err_dma_out; |
587 | 783 | ||
588 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; | 784 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; |
589 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + | 785 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + |
590 | AES_ODATAR(0); | 786 | AES_ODATAR(0); |
591 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | 787 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; |
592 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | 788 | dd->dma_lch_out.dma_conf.src_addr_width = |
789 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
790 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
791 | dd->dma_lch_out.dma_conf.dst_addr_width = | ||
792 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
593 | dd->dma_lch_out.dma_conf.device_fc = false; | 793 | dd->dma_lch_out.dma_conf.device_fc = false; |
594 | 794 | ||
595 | return 0; | 795 | return 0; |
@@ -665,13 +865,13 @@ static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) | |||
665 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) | 865 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) |
666 | { | 866 | { |
667 | return atmel_aes_crypt(req, | 867 | return atmel_aes_crypt(req, |
668 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB); | 868 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128); |
669 | } | 869 | } |
670 | 870 | ||
671 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) | 871 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) |
672 | { | 872 | { |
673 | return atmel_aes_crypt(req, | 873 | return atmel_aes_crypt(req, |
674 | AES_FLAGS_CFB); | 874 | AES_FLAGS_CFB | AES_FLAGS_CFB128); |
675 | } | 875 | } |
676 | 876 | ||
677 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) | 877 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) |
@@ -753,7 +953,7 @@ static struct crypto_alg aes_algs[] = { | |||
753 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 953 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
754 | .cra_blocksize = AES_BLOCK_SIZE, | 954 | .cra_blocksize = AES_BLOCK_SIZE, |
755 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 955 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
756 | .cra_alignmask = 0x0, | 956 | .cra_alignmask = 0xf, |
757 | .cra_type = &crypto_ablkcipher_type, | 957 | .cra_type = &crypto_ablkcipher_type, |
758 | .cra_module = THIS_MODULE, | 958 | .cra_module = THIS_MODULE, |
759 | .cra_init = atmel_aes_cra_init, | 959 | .cra_init = atmel_aes_cra_init, |
@@ -773,7 +973,7 @@ static struct crypto_alg aes_algs[] = { | |||
773 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 973 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
774 | .cra_blocksize = AES_BLOCK_SIZE, | 974 | .cra_blocksize = AES_BLOCK_SIZE, |
775 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 975 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
776 | .cra_alignmask = 0x0, | 976 | .cra_alignmask = 0xf, |
777 | .cra_type = &crypto_ablkcipher_type, | 977 | .cra_type = &crypto_ablkcipher_type, |
778 | .cra_module = THIS_MODULE, | 978 | .cra_module = THIS_MODULE, |
779 | .cra_init = atmel_aes_cra_init, | 979 | .cra_init = atmel_aes_cra_init, |
@@ -794,7 +994,7 @@ static struct crypto_alg aes_algs[] = { | |||
794 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 994 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
795 | .cra_blocksize = AES_BLOCK_SIZE, | 995 | .cra_blocksize = AES_BLOCK_SIZE, |
796 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 996 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
797 | .cra_alignmask = 0x0, | 997 | .cra_alignmask = 0xf, |
798 | .cra_type = &crypto_ablkcipher_type, | 998 | .cra_type = &crypto_ablkcipher_type, |
799 | .cra_module = THIS_MODULE, | 999 | .cra_module = THIS_MODULE, |
800 | .cra_init = atmel_aes_cra_init, | 1000 | .cra_init = atmel_aes_cra_init, |
@@ -815,7 +1015,7 @@ static struct crypto_alg aes_algs[] = { | |||
815 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1015 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
816 | .cra_blocksize = AES_BLOCK_SIZE, | 1016 | .cra_blocksize = AES_BLOCK_SIZE, |
817 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1017 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
818 | .cra_alignmask = 0x0, | 1018 | .cra_alignmask = 0xf, |
819 | .cra_type = &crypto_ablkcipher_type, | 1019 | .cra_type = &crypto_ablkcipher_type, |
820 | .cra_module = THIS_MODULE, | 1020 | .cra_module = THIS_MODULE, |
821 | .cra_init = atmel_aes_cra_init, | 1021 | .cra_init = atmel_aes_cra_init, |
@@ -836,7 +1036,7 @@ static struct crypto_alg aes_algs[] = { | |||
836 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1036 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
837 | .cra_blocksize = CFB32_BLOCK_SIZE, | 1037 | .cra_blocksize = CFB32_BLOCK_SIZE, |
838 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1038 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
839 | .cra_alignmask = 0x0, | 1039 | .cra_alignmask = 0x3, |
840 | .cra_type = &crypto_ablkcipher_type, | 1040 | .cra_type = &crypto_ablkcipher_type, |
841 | .cra_module = THIS_MODULE, | 1041 | .cra_module = THIS_MODULE, |
842 | .cra_init = atmel_aes_cra_init, | 1042 | .cra_init = atmel_aes_cra_init, |
@@ -857,7 +1057,7 @@ static struct crypto_alg aes_algs[] = { | |||
857 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1057 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
858 | .cra_blocksize = CFB16_BLOCK_SIZE, | 1058 | .cra_blocksize = CFB16_BLOCK_SIZE, |
859 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1059 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
860 | .cra_alignmask = 0x0, | 1060 | .cra_alignmask = 0x1, |
861 | .cra_type = &crypto_ablkcipher_type, | 1061 | .cra_type = &crypto_ablkcipher_type, |
862 | .cra_module = THIS_MODULE, | 1062 | .cra_module = THIS_MODULE, |
863 | .cra_init = atmel_aes_cra_init, | 1063 | .cra_init = atmel_aes_cra_init, |
@@ -899,7 +1099,7 @@ static struct crypto_alg aes_algs[] = { | |||
899 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1099 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
900 | .cra_blocksize = AES_BLOCK_SIZE, | 1100 | .cra_blocksize = AES_BLOCK_SIZE, |
901 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1101 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
902 | .cra_alignmask = 0x0, | 1102 | .cra_alignmask = 0xf, |
903 | .cra_type = &crypto_ablkcipher_type, | 1103 | .cra_type = &crypto_ablkcipher_type, |
904 | .cra_module = THIS_MODULE, | 1104 | .cra_module = THIS_MODULE, |
905 | .cra_init = atmel_aes_cra_init, | 1105 | .cra_init = atmel_aes_cra_init, |
@@ -915,15 +1115,14 @@ static struct crypto_alg aes_algs[] = { | |||
915 | }, | 1115 | }, |
916 | }; | 1116 | }; |
917 | 1117 | ||
918 | static struct crypto_alg aes_cfb64_alg[] = { | 1118 | static struct crypto_alg aes_cfb64_alg = { |
919 | { | ||
920 | .cra_name = "cfb64(aes)", | 1119 | .cra_name = "cfb64(aes)", |
921 | .cra_driver_name = "atmel-cfb64-aes", | 1120 | .cra_driver_name = "atmel-cfb64-aes", |
922 | .cra_priority = 100, | 1121 | .cra_priority = 100, |
923 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1122 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
924 | .cra_blocksize = CFB64_BLOCK_SIZE, | 1123 | .cra_blocksize = CFB64_BLOCK_SIZE, |
925 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1124 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
926 | .cra_alignmask = 0x0, | 1125 | .cra_alignmask = 0x7, |
927 | .cra_type = &crypto_ablkcipher_type, | 1126 | .cra_type = &crypto_ablkcipher_type, |
928 | .cra_module = THIS_MODULE, | 1127 | .cra_module = THIS_MODULE, |
929 | .cra_init = atmel_aes_cra_init, | 1128 | .cra_init = atmel_aes_cra_init, |
@@ -936,7 +1135,6 @@ static struct crypto_alg aes_cfb64_alg[] = { | |||
936 | .encrypt = atmel_aes_cfb64_encrypt, | 1135 | .encrypt = atmel_aes_cfb64_encrypt, |
937 | .decrypt = atmel_aes_cfb64_decrypt, | 1136 | .decrypt = atmel_aes_cfb64_decrypt, |
938 | } | 1137 | } |
939 | }, | ||
940 | }; | 1138 | }; |
941 | 1139 | ||
942 | static void atmel_aes_queue_task(unsigned long data) | 1140 | static void atmel_aes_queue_task(unsigned long data) |
@@ -969,7 +1167,14 @@ static void atmel_aes_done_task(unsigned long data) | |||
969 | err = dd->err ? : err; | 1167 | err = dd->err ? : err; |
970 | 1168 | ||
971 | if (dd->total && !err) { | 1169 | if (dd->total && !err) { |
972 | err = atmel_aes_crypt_dma_start(dd); | 1170 | if (dd->flags & AES_FLAGS_FAST) { |
1171 | dd->in_sg = sg_next(dd->in_sg); | ||
1172 | dd->out_sg = sg_next(dd->out_sg); | ||
1173 | if (!dd->in_sg || !dd->out_sg) | ||
1174 | err = -EINVAL; | ||
1175 | } | ||
1176 | if (!err) | ||
1177 | err = atmel_aes_crypt_dma_start(dd); | ||
973 | if (!err) | 1178 | if (!err) |
974 | return; /* DMA started. Not fininishing. */ | 1179 | return; /* DMA started. Not fininishing. */ |
975 | } | 1180 | } |
@@ -1003,8 +1208,8 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) | |||
1003 | 1208 | ||
1004 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | 1209 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) |
1005 | crypto_unregister_alg(&aes_algs[i]); | 1210 | crypto_unregister_alg(&aes_algs[i]); |
1006 | if (dd->hw_version >= 0x130) | 1211 | if (dd->caps.has_cfb64) |
1007 | crypto_unregister_alg(&aes_cfb64_alg[0]); | 1212 | crypto_unregister_alg(&aes_cfb64_alg); |
1008 | } | 1213 | } |
1009 | 1214 | ||
1010 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | 1215 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) |
@@ -1017,10 +1222,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | |||
1017 | goto err_aes_algs; | 1222 | goto err_aes_algs; |
1018 | } | 1223 | } |
1019 | 1224 | ||
1020 | atmel_aes_hw_version_init(dd); | 1225 | if (dd->caps.has_cfb64) { |
1021 | 1226 | err = crypto_register_alg(&aes_cfb64_alg); | |
1022 | if (dd->hw_version >= 0x130) { | ||
1023 | err = crypto_register_alg(&aes_cfb64_alg[0]); | ||
1024 | if (err) | 1227 | if (err) |
1025 | goto err_aes_cfb64_alg; | 1228 | goto err_aes_cfb64_alg; |
1026 | } | 1229 | } |
@@ -1036,10 +1239,32 @@ err_aes_algs: | |||
1036 | return err; | 1239 | return err; |
1037 | } | 1240 | } |
1038 | 1241 | ||
1242 | static void atmel_aes_get_cap(struct atmel_aes_dev *dd) | ||
1243 | { | ||
1244 | dd->caps.has_dualbuff = 0; | ||
1245 | dd->caps.has_cfb64 = 0; | ||
1246 | dd->caps.max_burst_size = 1; | ||
1247 | |||
1248 | /* keep only major version number */ | ||
1249 | switch (dd->hw_version & 0xff0) { | ||
1250 | case 0x130: | ||
1251 | dd->caps.has_dualbuff = 1; | ||
1252 | dd->caps.has_cfb64 = 1; | ||
1253 | dd->caps.max_burst_size = 4; | ||
1254 | break; | ||
1255 | case 0x120: | ||
1256 | break; | ||
1257 | default: | ||
1258 | dev_warn(dd->dev, | ||
1259 | "Unmanaged aes version, set minimum capabilities\n"); | ||
1260 | break; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1039 | static int atmel_aes_probe(struct platform_device *pdev) | 1264 | static int atmel_aes_probe(struct platform_device *pdev) |
1040 | { | 1265 | { |
1041 | struct atmel_aes_dev *aes_dd; | 1266 | struct atmel_aes_dev *aes_dd; |
1042 | struct aes_platform_data *pdata; | 1267 | struct crypto_platform_data *pdata; |
1043 | struct device *dev = &pdev->dev; | 1268 | struct device *dev = &pdev->dev; |
1044 | struct resource *aes_res; | 1269 | struct resource *aes_res; |
1045 | unsigned long aes_phys_size; | 1270 | unsigned long aes_phys_size; |
@@ -1099,7 +1324,7 @@ static int atmel_aes_probe(struct platform_device *pdev) | |||
1099 | } | 1324 | } |
1100 | 1325 | ||
1101 | /* Initializing the clock */ | 1326 | /* Initializing the clock */ |
1102 | aes_dd->iclk = clk_get(&pdev->dev, NULL); | 1327 | aes_dd->iclk = clk_get(&pdev->dev, "aes_clk"); |
1103 | if (IS_ERR(aes_dd->iclk)) { | 1328 | if (IS_ERR(aes_dd->iclk)) { |
1104 | dev_err(dev, "clock intialization failed.\n"); | 1329 | dev_err(dev, "clock intialization failed.\n"); |
1105 | err = PTR_ERR(aes_dd->iclk); | 1330 | err = PTR_ERR(aes_dd->iclk); |
@@ -1113,7 +1338,15 @@ static int atmel_aes_probe(struct platform_device *pdev) | |||
1113 | goto aes_io_err; | 1338 | goto aes_io_err; |
1114 | } | 1339 | } |
1115 | 1340 | ||
1116 | err = atmel_aes_dma_init(aes_dd); | 1341 | atmel_aes_hw_version_init(aes_dd); |
1342 | |||
1343 | atmel_aes_get_cap(aes_dd); | ||
1344 | |||
1345 | err = atmel_aes_buff_init(aes_dd); | ||
1346 | if (err) | ||
1347 | goto err_aes_buff; | ||
1348 | |||
1349 | err = atmel_aes_dma_init(aes_dd, pdata); | ||
1117 | if (err) | 1350 | if (err) |
1118 | goto err_aes_dma; | 1351 | goto err_aes_dma; |
1119 | 1352 | ||
@@ -1135,6 +1368,8 @@ err_algs: | |||
1135 | spin_unlock(&atmel_aes.lock); | 1368 | spin_unlock(&atmel_aes.lock); |
1136 | atmel_aes_dma_cleanup(aes_dd); | 1369 | atmel_aes_dma_cleanup(aes_dd); |
1137 | err_aes_dma: | 1370 | err_aes_dma: |
1371 | atmel_aes_buff_cleanup(aes_dd); | ||
1372 | err_aes_buff: | ||
1138 | iounmap(aes_dd->io_base); | 1373 | iounmap(aes_dd->io_base); |
1139 | aes_io_err: | 1374 | aes_io_err: |
1140 | clk_put(aes_dd->iclk); | 1375 | clk_put(aes_dd->iclk); |
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h index dc53a20d7da1..83b2d7425666 100644 --- a/drivers/crypto/atmel-sha-regs.h +++ b/drivers/crypto/atmel-sha-regs.h | |||
@@ -14,10 +14,13 @@ | |||
14 | #define SHA_MR_MODE_MANUAL 0x0 | 14 | #define SHA_MR_MODE_MANUAL 0x0 |
15 | #define SHA_MR_MODE_AUTO 0x1 | 15 | #define SHA_MR_MODE_AUTO 0x1 |
16 | #define SHA_MR_MODE_PDC 0x2 | 16 | #define SHA_MR_MODE_PDC 0x2 |
17 | #define SHA_MR_DUALBUFF (1 << 3) | ||
18 | #define SHA_MR_PROCDLY (1 << 4) | 17 | #define SHA_MR_PROCDLY (1 << 4) |
19 | #define SHA_MR_ALGO_SHA1 (0 << 8) | 18 | #define SHA_MR_ALGO_SHA1 (0 << 8) |
20 | #define SHA_MR_ALGO_SHA256 (1 << 8) | 19 | #define SHA_MR_ALGO_SHA256 (1 << 8) |
20 | #define SHA_MR_ALGO_SHA384 (2 << 8) | ||
21 | #define SHA_MR_ALGO_SHA512 (3 << 8) | ||
22 | #define SHA_MR_ALGO_SHA224 (4 << 8) | ||
23 | #define SHA_MR_DUALBUFF (1 << 16) | ||
21 | 24 | ||
22 | #define SHA_IER 0x10 | 25 | #define SHA_IER 0x10 |
23 | #define SHA_IDR 0x14 | 26 | #define SHA_IDR 0x14 |
@@ -33,6 +36,8 @@ | |||
33 | #define SHA_ISR_URAT_MR (0x2 << 12) | 36 | #define SHA_ISR_URAT_MR (0x2 << 12) |
34 | #define SHA_ISR_URAT_WO (0x5 << 12) | 37 | #define SHA_ISR_URAT_WO (0x5 << 12) |
35 | 38 | ||
39 | #define SHA_HW_VERSION 0xFC | ||
40 | |||
36 | #define SHA_TPR 0x108 | 41 | #define SHA_TPR 0x108 |
37 | #define SHA_TCR 0x10C | 42 | #define SHA_TCR 0x10C |
38 | #define SHA_TNPR 0x118 | 43 | #define SHA_TNPR 0x118 |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 4918e9424d31..eaed8bf183bc 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <crypto/sha.h> | 38 | #include <crypto/sha.h> |
39 | #include <crypto/hash.h> | 39 | #include <crypto/hash.h> |
40 | #include <crypto/internal/hash.h> | 40 | #include <crypto/internal/hash.h> |
41 | #include <linux/platform_data/crypto-atmel.h> | ||
41 | #include "atmel-sha-regs.h" | 42 | #include "atmel-sha-regs.h" |
42 | 43 | ||
43 | /* SHA flags */ | 44 | /* SHA flags */ |
@@ -52,11 +53,12 @@ | |||
52 | #define SHA_FLAGS_FINUP BIT(16) | 53 | #define SHA_FLAGS_FINUP BIT(16) |
53 | #define SHA_FLAGS_SG BIT(17) | 54 | #define SHA_FLAGS_SG BIT(17) |
54 | #define SHA_FLAGS_SHA1 BIT(18) | 55 | #define SHA_FLAGS_SHA1 BIT(18) |
55 | #define SHA_FLAGS_SHA256 BIT(19) | 56 | #define SHA_FLAGS_SHA224 BIT(19) |
56 | #define SHA_FLAGS_ERROR BIT(20) | 57 | #define SHA_FLAGS_SHA256 BIT(20) |
57 | #define SHA_FLAGS_PAD BIT(21) | 58 | #define SHA_FLAGS_SHA384 BIT(21) |
58 | 59 | #define SHA_FLAGS_SHA512 BIT(22) | |
59 | #define SHA_FLAGS_DUALBUFF BIT(24) | 60 | #define SHA_FLAGS_ERROR BIT(23) |
61 | #define SHA_FLAGS_PAD BIT(24) | ||
60 | 62 | ||
61 | #define SHA_OP_UPDATE 1 | 63 | #define SHA_OP_UPDATE 1 |
62 | #define SHA_OP_FINAL 2 | 64 | #define SHA_OP_FINAL 2 |
@@ -65,6 +67,12 @@ | |||
65 | 67 | ||
66 | #define ATMEL_SHA_DMA_THRESHOLD 56 | 68 | #define ATMEL_SHA_DMA_THRESHOLD 56 |
67 | 69 | ||
70 | struct atmel_sha_caps { | ||
71 | bool has_dma; | ||
72 | bool has_dualbuff; | ||
73 | bool has_sha224; | ||
74 | bool has_sha_384_512; | ||
75 | }; | ||
68 | 76 | ||
69 | struct atmel_sha_dev; | 77 | struct atmel_sha_dev; |
70 | 78 | ||
@@ -73,8 +81,8 @@ struct atmel_sha_reqctx { | |||
73 | unsigned long flags; | 81 | unsigned long flags; |
74 | unsigned long op; | 82 | unsigned long op; |
75 | 83 | ||
76 | u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); | 84 | u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); |
77 | size_t digcnt; | 85 | u64 digcnt[2]; |
78 | size_t bufcnt; | 86 | size_t bufcnt; |
79 | size_t buflen; | 87 | size_t buflen; |
80 | dma_addr_t dma_addr; | 88 | dma_addr_t dma_addr; |
@@ -84,6 +92,8 @@ struct atmel_sha_reqctx { | |||
84 | unsigned int offset; /* offset in current sg */ | 92 | unsigned int offset; /* offset in current sg */ |
85 | unsigned int total; /* total request */ | 93 | unsigned int total; /* total request */ |
86 | 94 | ||
95 | size_t block_size; | ||
96 | |||
87 | u8 buffer[0] __aligned(sizeof(u32)); | 97 | u8 buffer[0] __aligned(sizeof(u32)); |
88 | }; | 98 | }; |
89 | 99 | ||
@@ -97,7 +107,12 @@ struct atmel_sha_ctx { | |||
97 | 107 | ||
98 | }; | 108 | }; |
99 | 109 | ||
100 | #define ATMEL_SHA_QUEUE_LENGTH 1 | 110 | #define ATMEL_SHA_QUEUE_LENGTH 50 |
111 | |||
112 | struct atmel_sha_dma { | ||
113 | struct dma_chan *chan; | ||
114 | struct dma_slave_config dma_conf; | ||
115 | }; | ||
101 | 116 | ||
102 | struct atmel_sha_dev { | 117 | struct atmel_sha_dev { |
103 | struct list_head list; | 118 | struct list_head list; |
@@ -114,6 +129,12 @@ struct atmel_sha_dev { | |||
114 | unsigned long flags; | 129 | unsigned long flags; |
115 | struct crypto_queue queue; | 130 | struct crypto_queue queue; |
116 | struct ahash_request *req; | 131 | struct ahash_request *req; |
132 | |||
133 | struct atmel_sha_dma dma_lch_in; | ||
134 | |||
135 | struct atmel_sha_caps caps; | ||
136 | |||
137 | u32 hw_version; | ||
117 | }; | 138 | }; |
118 | 139 | ||
119 | struct atmel_sha_drv { | 140 | struct atmel_sha_drv { |
@@ -137,14 +158,6 @@ static inline void atmel_sha_write(struct atmel_sha_dev *dd, | |||
137 | writel_relaxed(value, dd->io_base + offset); | 158 | writel_relaxed(value, dd->io_base + offset); |
138 | } | 159 | } |
139 | 160 | ||
140 | static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd) | ||
141 | { | ||
142 | atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF); | ||
143 | |||
144 | if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF) | ||
145 | dd->flags |= SHA_FLAGS_DUALBUFF; | ||
146 | } | ||
147 | |||
148 | static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) | 161 | static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) |
149 | { | 162 | { |
150 | size_t count; | 163 | size_t count; |
@@ -176,31 +189,58 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) | |||
176 | } | 189 | } |
177 | 190 | ||
178 | /* | 191 | /* |
179 | * The purpose of this padding is to ensure that the padded message | 192 | * The purpose of this padding is to ensure that the padded message is a |
180 | * is a multiple of 512 bits. The bit "1" is appended at the end of | 193 | * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). |
181 | * the message followed by "padlen-1" zero bits. Then a 64 bits block | 194 | * The bit "1" is appended at the end of the message followed by |
182 | * equals to the message length in bits is appended. | 195 | * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or |
196 | * 128 bits block (SHA384/SHA512) equals to the message length in bits | ||
197 | * is appended. | ||
183 | * | 198 | * |
184 | * padlen is calculated as followed: | 199 | * For SHA1/SHA224/SHA256, padlen is calculated as followed: |
185 | * - if message length < 56 bytes then padlen = 56 - message length | 200 | * - if message length < 56 bytes then padlen = 56 - message length |
186 | * - else padlen = 64 + 56 - message length | 201 | * - else padlen = 64 + 56 - message length |
202 | * | ||
203 | * For SHA384/SHA512, padlen is calculated as followed: | ||
204 | * - if message length < 112 bytes then padlen = 112 - message length | ||
205 | * - else padlen = 128 + 112 - message length | ||
187 | */ | 206 | */ |
188 | static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) | 207 | static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) |
189 | { | 208 | { |
190 | unsigned int index, padlen; | 209 | unsigned int index, padlen; |
191 | u64 bits; | 210 | u64 bits[2]; |
192 | u64 size; | 211 | u64 size[2]; |
193 | 212 | ||
194 | bits = (ctx->bufcnt + ctx->digcnt + length) << 3; | 213 | size[0] = ctx->digcnt[0]; |
195 | size = cpu_to_be64(bits); | 214 | size[1] = ctx->digcnt[1]; |
196 | 215 | ||
197 | index = ctx->bufcnt & 0x3f; | 216 | size[0] += ctx->bufcnt; |
198 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | 217 | if (size[0] < ctx->bufcnt) |
199 | *(ctx->buffer + ctx->bufcnt) = 0x80; | 218 | size[1]++; |
200 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | 219 | |
201 | memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8); | 220 | size[0] += length; |
202 | ctx->bufcnt += padlen + 8; | 221 | if (size[0] < length) |
203 | ctx->flags |= SHA_FLAGS_PAD; | 222 | size[1]++; |
223 | |||
224 | bits[1] = cpu_to_be64(size[0] << 3); | ||
225 | bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61); | ||
226 | |||
227 | if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { | ||
228 | index = ctx->bufcnt & 0x7f; | ||
229 | padlen = (index < 112) ? (112 - index) : ((128+112) - index); | ||
230 | *(ctx->buffer + ctx->bufcnt) = 0x80; | ||
231 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | ||
232 | memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); | ||
233 | ctx->bufcnt += padlen + 16; | ||
234 | ctx->flags |= SHA_FLAGS_PAD; | ||
235 | } else { | ||
236 | index = ctx->bufcnt & 0x3f; | ||
237 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | ||
238 | *(ctx->buffer + ctx->bufcnt) = 0x80; | ||
239 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | ||
240 | memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); | ||
241 | ctx->bufcnt += padlen + 8; | ||
242 | ctx->flags |= SHA_FLAGS_PAD; | ||
243 | } | ||
204 | } | 244 | } |
205 | 245 | ||
206 | static int atmel_sha_init(struct ahash_request *req) | 246 | static int atmel_sha_init(struct ahash_request *req) |
@@ -231,13 +271,35 @@ static int atmel_sha_init(struct ahash_request *req) | |||
231 | dev_dbg(dd->dev, "init: digest size: %d\n", | 271 | dev_dbg(dd->dev, "init: digest size: %d\n", |
232 | crypto_ahash_digestsize(tfm)); | 272 | crypto_ahash_digestsize(tfm)); |
233 | 273 | ||
234 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | 274 | switch (crypto_ahash_digestsize(tfm)) { |
275 | case SHA1_DIGEST_SIZE: | ||
235 | ctx->flags |= SHA_FLAGS_SHA1; | 276 | ctx->flags |= SHA_FLAGS_SHA1; |
236 | else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE) | 277 | ctx->block_size = SHA1_BLOCK_SIZE; |
278 | break; | ||
279 | case SHA224_DIGEST_SIZE: | ||
280 | ctx->flags |= SHA_FLAGS_SHA224; | ||
281 | ctx->block_size = SHA224_BLOCK_SIZE; | ||
282 | break; | ||
283 | case SHA256_DIGEST_SIZE: | ||
237 | ctx->flags |= SHA_FLAGS_SHA256; | 284 | ctx->flags |= SHA_FLAGS_SHA256; |
285 | ctx->block_size = SHA256_BLOCK_SIZE; | ||
286 | break; | ||
287 | case SHA384_DIGEST_SIZE: | ||
288 | ctx->flags |= SHA_FLAGS_SHA384; | ||
289 | ctx->block_size = SHA384_BLOCK_SIZE; | ||
290 | break; | ||
291 | case SHA512_DIGEST_SIZE: | ||
292 | ctx->flags |= SHA_FLAGS_SHA512; | ||
293 | ctx->block_size = SHA512_BLOCK_SIZE; | ||
294 | break; | ||
295 | default: | ||
296 | return -EINVAL; | ||
297 | break; | ||
298 | } | ||
238 | 299 | ||
239 | ctx->bufcnt = 0; | 300 | ctx->bufcnt = 0; |
240 | ctx->digcnt = 0; | 301 | ctx->digcnt[0] = 0; |
302 | ctx->digcnt[1] = 0; | ||
241 | ctx->buflen = SHA_BUFFER_LEN; | 303 | ctx->buflen = SHA_BUFFER_LEN; |
242 | 304 | ||
243 | return 0; | 305 | return 0; |
@@ -249,19 +311,28 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) | |||
249 | u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; | 311 | u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; |
250 | 312 | ||
251 | if (likely(dma)) { | 313 | if (likely(dma)) { |
252 | atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); | 314 | if (!dd->caps.has_dma) |
315 | atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); | ||
253 | valmr = SHA_MR_MODE_PDC; | 316 | valmr = SHA_MR_MODE_PDC; |
254 | if (dd->flags & SHA_FLAGS_DUALBUFF) | 317 | if (dd->caps.has_dualbuff) |
255 | valmr = SHA_MR_DUALBUFF; | 318 | valmr |= SHA_MR_DUALBUFF; |
256 | } else { | 319 | } else { |
257 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | 320 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); |
258 | } | 321 | } |
259 | 322 | ||
260 | if (ctx->flags & SHA_FLAGS_SHA256) | 323 | if (ctx->flags & SHA_FLAGS_SHA1) |
324 | valmr |= SHA_MR_ALGO_SHA1; | ||
325 | else if (ctx->flags & SHA_FLAGS_SHA224) | ||
326 | valmr |= SHA_MR_ALGO_SHA224; | ||
327 | else if (ctx->flags & SHA_FLAGS_SHA256) | ||
261 | valmr |= SHA_MR_ALGO_SHA256; | 328 | valmr |= SHA_MR_ALGO_SHA256; |
329 | else if (ctx->flags & SHA_FLAGS_SHA384) | ||
330 | valmr |= SHA_MR_ALGO_SHA384; | ||
331 | else if (ctx->flags & SHA_FLAGS_SHA512) | ||
332 | valmr |= SHA_MR_ALGO_SHA512; | ||
262 | 333 | ||
263 | /* Setting CR_FIRST only for the first iteration */ | 334 | /* Setting CR_FIRST only for the first iteration */ |
264 | if (!ctx->digcnt) | 335 | if (!(ctx->digcnt[0] || ctx->digcnt[1])) |
265 | valcr = SHA_CR_FIRST; | 336 | valcr = SHA_CR_FIRST; |
266 | 337 | ||
267 | atmel_sha_write(dd, SHA_CR, valcr); | 338 | atmel_sha_write(dd, SHA_CR, valcr); |
@@ -275,13 +346,15 @@ static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, | |||
275 | int count, len32; | 346 | int count, len32; |
276 | const u32 *buffer = (const u32 *)buf; | 347 | const u32 *buffer = (const u32 *)buf; |
277 | 348 | ||
278 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | 349 | dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", |
279 | ctx->digcnt, length, final); | 350 | ctx->digcnt[1], ctx->digcnt[0], length, final); |
280 | 351 | ||
281 | atmel_sha_write_ctrl(dd, 0); | 352 | atmel_sha_write_ctrl(dd, 0); |
282 | 353 | ||
283 | /* should be non-zero before next lines to disable clocks later */ | 354 | /* should be non-zero before next lines to disable clocks later */ |
284 | ctx->digcnt += length; | 355 | ctx->digcnt[0] += length; |
356 | if (ctx->digcnt[0] < length) | ||
357 | ctx->digcnt[1]++; | ||
285 | 358 | ||
286 | if (final) | 359 | if (final) |
287 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | 360 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ |
@@ -302,8 +375,8 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |||
302 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | 375 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); |
303 | int len32; | 376 | int len32; |
304 | 377 | ||
305 | dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n", | 378 | dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", |
306 | ctx->digcnt, length1, final); | 379 | ctx->digcnt[1], ctx->digcnt[0], length1, final); |
307 | 380 | ||
308 | len32 = DIV_ROUND_UP(length1, sizeof(u32)); | 381 | len32 = DIV_ROUND_UP(length1, sizeof(u32)); |
309 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); | 382 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); |
@@ -317,7 +390,9 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |||
317 | atmel_sha_write_ctrl(dd, 1); | 390 | atmel_sha_write_ctrl(dd, 1); |
318 | 391 | ||
319 | /* should be non-zero before next lines to disable clocks later */ | 392 | /* should be non-zero before next lines to disable clocks later */ |
320 | ctx->digcnt += length1; | 393 | ctx->digcnt[0] += length1; |
394 | if (ctx->digcnt[0] < length1) | ||
395 | ctx->digcnt[1]++; | ||
321 | 396 | ||
322 | if (final) | 397 | if (final) |
323 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | 398 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ |
@@ -330,6 +405,86 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |||
330 | return -EINPROGRESS; | 405 | return -EINPROGRESS; |
331 | } | 406 | } |
332 | 407 | ||
408 | static void atmel_sha_dma_callback(void *data) | ||
409 | { | ||
410 | struct atmel_sha_dev *dd = data; | ||
411 | |||
412 | /* dma_lch_in - completed - wait DATRDY */ | ||
413 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | ||
414 | } | ||
415 | |||
416 | static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | ||
417 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | ||
418 | { | ||
419 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
420 | struct dma_async_tx_descriptor *in_desc; | ||
421 | struct scatterlist sg[2]; | ||
422 | |||
423 | dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", | ||
424 | ctx->digcnt[1], ctx->digcnt[0], length1, final); | ||
425 | |||
426 | if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 | | ||
427 | SHA_FLAGS_SHA256)) { | ||
428 | dd->dma_lch_in.dma_conf.src_maxburst = 16; | ||
429 | dd->dma_lch_in.dma_conf.dst_maxburst = 16; | ||
430 | } else { | ||
431 | dd->dma_lch_in.dma_conf.src_maxburst = 32; | ||
432 | dd->dma_lch_in.dma_conf.dst_maxburst = 32; | ||
433 | } | ||
434 | |||
435 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | ||
436 | |||
437 | if (length2) { | ||
438 | sg_init_table(sg, 2); | ||
439 | sg_dma_address(&sg[0]) = dma_addr1; | ||
440 | sg_dma_len(&sg[0]) = length1; | ||
441 | sg_dma_address(&sg[1]) = dma_addr2; | ||
442 | sg_dma_len(&sg[1]) = length2; | ||
443 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, | ||
444 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
445 | } else { | ||
446 | sg_init_table(sg, 1); | ||
447 | sg_dma_address(&sg[0]) = dma_addr1; | ||
448 | sg_dma_len(&sg[0]) = length1; | ||
449 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, | ||
450 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
451 | } | ||
452 | if (!in_desc) | ||
453 | return -EINVAL; | ||
454 | |||
455 | in_desc->callback = atmel_sha_dma_callback; | ||
456 | in_desc->callback_param = dd; | ||
457 | |||
458 | atmel_sha_write_ctrl(dd, 1); | ||
459 | |||
460 | /* should be non-zero before next lines to disable clocks later */ | ||
461 | ctx->digcnt[0] += length1; | ||
462 | if (ctx->digcnt[0] < length1) | ||
463 | ctx->digcnt[1]++; | ||
464 | |||
465 | if (final) | ||
466 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | ||
467 | |||
468 | dd->flags |= SHA_FLAGS_DMA_ACTIVE; | ||
469 | |||
470 | /* Start DMA transfer */ | ||
471 | dmaengine_submit(in_desc); | ||
472 | dma_async_issue_pending(dd->dma_lch_in.chan); | ||
473 | |||
474 | return -EINPROGRESS; | ||
475 | } | ||
476 | |||
477 | static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | ||
478 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | ||
479 | { | ||
480 | if (dd->caps.has_dma) | ||
481 | return atmel_sha_xmit_dma(dd, dma_addr1, length1, | ||
482 | dma_addr2, length2, final); | ||
483 | else | ||
484 | return atmel_sha_xmit_pdc(dd, dma_addr1, length1, | ||
485 | dma_addr2, length2, final); | ||
486 | } | ||
487 | |||
333 | static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) | 488 | static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) |
334 | { | 489 | { |
335 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | 490 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); |
@@ -337,7 +492,6 @@ static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) | |||
337 | 492 | ||
338 | atmel_sha_append_sg(ctx); | 493 | atmel_sha_append_sg(ctx); |
339 | atmel_sha_fill_padding(ctx, 0); | 494 | atmel_sha_fill_padding(ctx, 0); |
340 | |||
341 | bufcnt = ctx->bufcnt; | 495 | bufcnt = ctx->bufcnt; |
342 | ctx->bufcnt = 0; | 496 | ctx->bufcnt = 0; |
343 | 497 | ||
@@ -349,17 +503,17 @@ static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, | |||
349 | size_t length, int final) | 503 | size_t length, int final) |
350 | { | 504 | { |
351 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | 505 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, |
352 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | 506 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
353 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | 507 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
354 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + | 508 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + |
355 | SHA1_BLOCK_SIZE); | 509 | ctx->block_size); |
356 | return -EINVAL; | 510 | return -EINVAL; |
357 | } | 511 | } |
358 | 512 | ||
359 | ctx->flags &= ~SHA_FLAGS_SG; | 513 | ctx->flags &= ~SHA_FLAGS_SG; |
360 | 514 | ||
361 | /* next call does not fail... so no unmap in the case of error */ | 515 | /* next call does not fail... so no unmap in the case of error */ |
362 | return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final); | 516 | return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); |
363 | } | 517 | } |
364 | 518 | ||
365 | static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) | 519 | static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) |
@@ -372,8 +526,8 @@ static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) | |||
372 | 526 | ||
373 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | 527 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; |
374 | 528 | ||
375 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | 529 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n", |
376 | ctx->bufcnt, ctx->digcnt, final); | 530 | ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); |
377 | 531 | ||
378 | if (final) | 532 | if (final) |
379 | atmel_sha_fill_padding(ctx, 0); | 533 | atmel_sha_fill_padding(ctx, 0); |
@@ -400,30 +554,25 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
400 | if (ctx->bufcnt || ctx->offset) | 554 | if (ctx->bufcnt || ctx->offset) |
401 | return atmel_sha_update_dma_slow(dd); | 555 | return atmel_sha_update_dma_slow(dd); |
402 | 556 | ||
403 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | 557 | dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n", |
404 | ctx->digcnt, ctx->bufcnt, ctx->total); | 558 | ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); |
405 | 559 | ||
406 | sg = ctx->sg; | 560 | sg = ctx->sg; |
407 | 561 | ||
408 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | 562 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) |
409 | return atmel_sha_update_dma_slow(dd); | 563 | return atmel_sha_update_dma_slow(dd); |
410 | 564 | ||
411 | if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE)) | 565 | if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) |
412 | /* size is not SHA1_BLOCK_SIZE aligned */ | 566 | /* size is not ctx->block_size aligned */ |
413 | return atmel_sha_update_dma_slow(dd); | 567 | return atmel_sha_update_dma_slow(dd); |
414 | 568 | ||
415 | length = min(ctx->total, sg->length); | 569 | length = min(ctx->total, sg->length); |
416 | 570 | ||
417 | if (sg_is_last(sg)) { | 571 | if (sg_is_last(sg)) { |
418 | if (!(ctx->flags & SHA_FLAGS_FINUP)) { | 572 | if (!(ctx->flags & SHA_FLAGS_FINUP)) { |
419 | /* not last sg must be SHA1_BLOCK_SIZE aligned */ | 573 | /* not last sg must be ctx->block_size aligned */ |
420 | tail = length & (SHA1_BLOCK_SIZE - 1); | 574 | tail = length & (ctx->block_size - 1); |
421 | length -= tail; | 575 | length -= tail; |
422 | if (length == 0) { | ||
423 | /* offset where to start slow */ | ||
424 | ctx->offset = length; | ||
425 | return atmel_sha_update_dma_slow(dd); | ||
426 | } | ||
427 | } | 576 | } |
428 | } | 577 | } |
429 | 578 | ||
@@ -434,7 +583,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
434 | 583 | ||
435 | /* Add padding */ | 584 | /* Add padding */ |
436 | if (final) { | 585 | if (final) { |
437 | tail = length & (SHA1_BLOCK_SIZE - 1); | 586 | tail = length & (ctx->block_size - 1); |
438 | length -= tail; | 587 | length -= tail; |
439 | ctx->total += tail; | 588 | ctx->total += tail; |
440 | ctx->offset = length; /* offset where to start slow */ | 589 | ctx->offset = length; /* offset where to start slow */ |
@@ -445,10 +594,10 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
445 | atmel_sha_fill_padding(ctx, length); | 594 | atmel_sha_fill_padding(ctx, length); |
446 | 595 | ||
447 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | 596 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, |
448 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | 597 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
449 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | 598 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
450 | dev_err(dd->dev, "dma %u bytes error\n", | 599 | dev_err(dd->dev, "dma %u bytes error\n", |
451 | ctx->buflen + SHA1_BLOCK_SIZE); | 600 | ctx->buflen + ctx->block_size); |
452 | return -EINVAL; | 601 | return -EINVAL; |
453 | } | 602 | } |
454 | 603 | ||
@@ -456,7 +605,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
456 | ctx->flags &= ~SHA_FLAGS_SG; | 605 | ctx->flags &= ~SHA_FLAGS_SG; |
457 | count = ctx->bufcnt; | 606 | count = ctx->bufcnt; |
458 | ctx->bufcnt = 0; | 607 | ctx->bufcnt = 0; |
459 | return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0, | 608 | return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, |
460 | 0, final); | 609 | 0, final); |
461 | } else { | 610 | } else { |
462 | ctx->sg = sg; | 611 | ctx->sg = sg; |
@@ -470,7 +619,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
470 | 619 | ||
471 | count = ctx->bufcnt; | 620 | count = ctx->bufcnt; |
472 | ctx->bufcnt = 0; | 621 | ctx->bufcnt = 0; |
473 | return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), | 622 | return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), |
474 | length, ctx->dma_addr, count, final); | 623 | length, ctx->dma_addr, count, final); |
475 | } | 624 | } |
476 | } | 625 | } |
@@ -483,7 +632,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
483 | ctx->flags |= SHA_FLAGS_SG; | 632 | ctx->flags |= SHA_FLAGS_SG; |
484 | 633 | ||
485 | /* next call does not fail... so no unmap in the case of error */ | 634 | /* next call does not fail... so no unmap in the case of error */ |
486 | return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0, | 635 | return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, |
487 | 0, final); | 636 | 0, final); |
488 | } | 637 | } |
489 | 638 | ||
@@ -498,12 +647,13 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) | |||
498 | if (ctx->sg) | 647 | if (ctx->sg) |
499 | ctx->offset = 0; | 648 | ctx->offset = 0; |
500 | } | 649 | } |
501 | if (ctx->flags & SHA_FLAGS_PAD) | 650 | if (ctx->flags & SHA_FLAGS_PAD) { |
502 | dma_unmap_single(dd->dev, ctx->dma_addr, | 651 | dma_unmap_single(dd->dev, ctx->dma_addr, |
503 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | 652 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
653 | } | ||
504 | } else { | 654 | } else { |
505 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + | 655 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + |
506 | SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | 656 | ctx->block_size, DMA_TO_DEVICE); |
507 | } | 657 | } |
508 | 658 | ||
509 | return 0; | 659 | return 0; |
@@ -515,8 +665,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd) | |||
515 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | 665 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); |
516 | int err; | 666 | int err; |
517 | 667 | ||
518 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | 668 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", |
519 | ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0); | 669 | ctx->total, ctx->digcnt[1], ctx->digcnt[0]); |
520 | 670 | ||
521 | if (ctx->flags & SHA_FLAGS_CPU) | 671 | if (ctx->flags & SHA_FLAGS_CPU) |
522 | err = atmel_sha_update_cpu(dd); | 672 | err = atmel_sha_update_cpu(dd); |
@@ -524,8 +674,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd) | |||
524 | err = atmel_sha_update_dma_start(dd); | 674 | err = atmel_sha_update_dma_start(dd); |
525 | 675 | ||
526 | /* wait for dma completion before can take more data */ | 676 | /* wait for dma completion before can take more data */ |
527 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", | 677 | dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", |
528 | err, ctx->digcnt); | 678 | err, ctx->digcnt[1], ctx->digcnt[0]); |
529 | 679 | ||
530 | return err; | 680 | return err; |
531 | } | 681 | } |
@@ -562,12 +712,21 @@ static void atmel_sha_copy_hash(struct ahash_request *req) | |||
562 | u32 *hash = (u32 *)ctx->digest; | 712 | u32 *hash = (u32 *)ctx->digest; |
563 | int i; | 713 | int i; |
564 | 714 | ||
565 | if (likely(ctx->flags & SHA_FLAGS_SHA1)) | 715 | if (ctx->flags & SHA_FLAGS_SHA1) |
566 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | 716 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) |
567 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | 717 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); |
568 | else | 718 | else if (ctx->flags & SHA_FLAGS_SHA224) |
719 | for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++) | ||
720 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
721 | else if (ctx->flags & SHA_FLAGS_SHA256) | ||
569 | for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) | 722 | for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) |
570 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | 723 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); |
724 | else if (ctx->flags & SHA_FLAGS_SHA384) | ||
725 | for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++) | ||
726 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
727 | else | ||
728 | for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++) | ||
729 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
571 | } | 730 | } |
572 | 731 | ||
573 | static void atmel_sha_copy_ready_hash(struct ahash_request *req) | 732 | static void atmel_sha_copy_ready_hash(struct ahash_request *req) |
@@ -577,10 +736,16 @@ static void atmel_sha_copy_ready_hash(struct ahash_request *req) | |||
577 | if (!req->result) | 736 | if (!req->result) |
578 | return; | 737 | return; |
579 | 738 | ||
580 | if (likely(ctx->flags & SHA_FLAGS_SHA1)) | 739 | if (ctx->flags & SHA_FLAGS_SHA1) |
581 | memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); | 740 | memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); |
582 | else | 741 | else if (ctx->flags & SHA_FLAGS_SHA224) |
742 | memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); | ||
743 | else if (ctx->flags & SHA_FLAGS_SHA256) | ||
583 | memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); | 744 | memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); |
745 | else if (ctx->flags & SHA_FLAGS_SHA384) | ||
746 | memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); | ||
747 | else | ||
748 | memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); | ||
584 | } | 749 | } |
585 | 750 | ||
586 | static int atmel_sha_finish(struct ahash_request *req) | 751 | static int atmel_sha_finish(struct ahash_request *req) |
@@ -589,11 +754,11 @@ static int atmel_sha_finish(struct ahash_request *req) | |||
589 | struct atmel_sha_dev *dd = ctx->dd; | 754 | struct atmel_sha_dev *dd = ctx->dd; |
590 | int err = 0; | 755 | int err = 0; |
591 | 756 | ||
592 | if (ctx->digcnt) | 757 | if (ctx->digcnt[0] || ctx->digcnt[1]) |
593 | atmel_sha_copy_ready_hash(req); | 758 | atmel_sha_copy_ready_hash(req); |
594 | 759 | ||
595 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, | 760 | dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], |
596 | ctx->bufcnt); | 761 | ctx->digcnt[0], ctx->bufcnt); |
597 | 762 | ||
598 | return err; | 763 | return err; |
599 | } | 764 | } |
@@ -628,9 +793,8 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | |||
628 | { | 793 | { |
629 | clk_prepare_enable(dd->iclk); | 794 | clk_prepare_enable(dd->iclk); |
630 | 795 | ||
631 | if (SHA_FLAGS_INIT & dd->flags) { | 796 | if (!(SHA_FLAGS_INIT & dd->flags)) { |
632 | atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); | 797 | atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); |
633 | atmel_sha_dualbuff_test(dd); | ||
634 | dd->flags |= SHA_FLAGS_INIT; | 798 | dd->flags |= SHA_FLAGS_INIT; |
635 | dd->err = 0; | 799 | dd->err = 0; |
636 | } | 800 | } |
@@ -638,6 +802,23 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | |||
638 | return 0; | 802 | return 0; |
639 | } | 803 | } |
640 | 804 | ||
805 | static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) | ||
806 | { | ||
807 | return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; | ||
808 | } | ||
809 | |||
810 | static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) | ||
811 | { | ||
812 | atmel_sha_hw_init(dd); | ||
813 | |||
814 | dd->hw_version = atmel_sha_get_version(dd); | ||
815 | |||
816 | dev_info(dd->dev, | ||
817 | "version: 0x%x\n", dd->hw_version); | ||
818 | |||
819 | clk_disable_unprepare(dd->iclk); | ||
820 | } | ||
821 | |||
641 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, | 822 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, |
642 | struct ahash_request *req) | 823 | struct ahash_request *req) |
643 | { | 824 | { |
@@ -682,10 +863,9 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, | |||
682 | 863 | ||
683 | if (ctx->op == SHA_OP_UPDATE) { | 864 | if (ctx->op == SHA_OP_UPDATE) { |
684 | err = atmel_sha_update_req(dd); | 865 | err = atmel_sha_update_req(dd); |
685 | if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) { | 866 | if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) |
686 | /* no final() after finup() */ | 867 | /* no final() after finup() */ |
687 | err = atmel_sha_final_req(dd); | 868 | err = atmel_sha_final_req(dd); |
688 | } | ||
689 | } else if (ctx->op == SHA_OP_FINAL) { | 869 | } else if (ctx->op == SHA_OP_FINAL) { |
690 | err = atmel_sha_final_req(dd); | 870 | err = atmel_sha_final_req(dd); |
691 | } | 871 | } |
@@ -808,7 +988,7 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
808 | } | 988 | } |
809 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 989 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
810 | sizeof(struct atmel_sha_reqctx) + | 990 | sizeof(struct atmel_sha_reqctx) + |
811 | SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); | 991 | SHA_BUFFER_LEN + SHA512_BLOCK_SIZE); |
812 | 992 | ||
813 | return 0; | 993 | return 0; |
814 | } | 994 | } |
@@ -826,7 +1006,7 @@ static void atmel_sha_cra_exit(struct crypto_tfm *tfm) | |||
826 | tctx->fallback = NULL; | 1006 | tctx->fallback = NULL; |
827 | } | 1007 | } |
828 | 1008 | ||
829 | static struct ahash_alg sha_algs[] = { | 1009 | static struct ahash_alg sha_1_256_algs[] = { |
830 | { | 1010 | { |
831 | .init = atmel_sha_init, | 1011 | .init = atmel_sha_init, |
832 | .update = atmel_sha_update, | 1012 | .update = atmel_sha_update, |
@@ -875,6 +1055,79 @@ static struct ahash_alg sha_algs[] = { | |||
875 | }, | 1055 | }, |
876 | }; | 1056 | }; |
877 | 1057 | ||
1058 | static struct ahash_alg sha_224_alg = { | ||
1059 | .init = atmel_sha_init, | ||
1060 | .update = atmel_sha_update, | ||
1061 | .final = atmel_sha_final, | ||
1062 | .finup = atmel_sha_finup, | ||
1063 | .digest = atmel_sha_digest, | ||
1064 | .halg = { | ||
1065 | .digestsize = SHA224_DIGEST_SIZE, | ||
1066 | .base = { | ||
1067 | .cra_name = "sha224", | ||
1068 | .cra_driver_name = "atmel-sha224", | ||
1069 | .cra_priority = 100, | ||
1070 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1071 | CRYPTO_ALG_NEED_FALLBACK, | ||
1072 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
1073 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
1074 | .cra_alignmask = 0, | ||
1075 | .cra_module = THIS_MODULE, | ||
1076 | .cra_init = atmel_sha_cra_init, | ||
1077 | .cra_exit = atmel_sha_cra_exit, | ||
1078 | } | ||
1079 | } | ||
1080 | }; | ||
1081 | |||
1082 | static struct ahash_alg sha_384_512_algs[] = { | ||
1083 | { | ||
1084 | .init = atmel_sha_init, | ||
1085 | .update = atmel_sha_update, | ||
1086 | .final = atmel_sha_final, | ||
1087 | .finup = atmel_sha_finup, | ||
1088 | .digest = atmel_sha_digest, | ||
1089 | .halg = { | ||
1090 | .digestsize = SHA384_DIGEST_SIZE, | ||
1091 | .base = { | ||
1092 | .cra_name = "sha384", | ||
1093 | .cra_driver_name = "atmel-sha384", | ||
1094 | .cra_priority = 100, | ||
1095 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1096 | CRYPTO_ALG_NEED_FALLBACK, | ||
1097 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
1098 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
1099 | .cra_alignmask = 0x3, | ||
1100 | .cra_module = THIS_MODULE, | ||
1101 | .cra_init = atmel_sha_cra_init, | ||
1102 | .cra_exit = atmel_sha_cra_exit, | ||
1103 | } | ||
1104 | } | ||
1105 | }, | ||
1106 | { | ||
1107 | .init = atmel_sha_init, | ||
1108 | .update = atmel_sha_update, | ||
1109 | .final = atmel_sha_final, | ||
1110 | .finup = atmel_sha_finup, | ||
1111 | .digest = atmel_sha_digest, | ||
1112 | .halg = { | ||
1113 | .digestsize = SHA512_DIGEST_SIZE, | ||
1114 | .base = { | ||
1115 | .cra_name = "sha512", | ||
1116 | .cra_driver_name = "atmel-sha512", | ||
1117 | .cra_priority = 100, | ||
1118 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1119 | CRYPTO_ALG_NEED_FALLBACK, | ||
1120 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
1121 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
1122 | .cra_alignmask = 0x3, | ||
1123 | .cra_module = THIS_MODULE, | ||
1124 | .cra_init = atmel_sha_cra_init, | ||
1125 | .cra_exit = atmel_sha_cra_exit, | ||
1126 | } | ||
1127 | } | ||
1128 | }, | ||
1129 | }; | ||
1130 | |||
878 | static void atmel_sha_done_task(unsigned long data) | 1131 | static void atmel_sha_done_task(unsigned long data) |
879 | { | 1132 | { |
880 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; | 1133 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; |
@@ -941,32 +1194,142 @@ static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) | |||
941 | { | 1194 | { |
942 | int i; | 1195 | int i; |
943 | 1196 | ||
944 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) | 1197 | for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) |
945 | crypto_unregister_ahash(&sha_algs[i]); | 1198 | crypto_unregister_ahash(&sha_1_256_algs[i]); |
1199 | |||
1200 | if (dd->caps.has_sha224) | ||
1201 | crypto_unregister_ahash(&sha_224_alg); | ||
1202 | |||
1203 | if (dd->caps.has_sha_384_512) { | ||
1204 | for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) | ||
1205 | crypto_unregister_ahash(&sha_384_512_algs[i]); | ||
1206 | } | ||
946 | } | 1207 | } |
947 | 1208 | ||
948 | static int atmel_sha_register_algs(struct atmel_sha_dev *dd) | 1209 | static int atmel_sha_register_algs(struct atmel_sha_dev *dd) |
949 | { | 1210 | { |
950 | int err, i, j; | 1211 | int err, i, j; |
951 | 1212 | ||
952 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { | 1213 | for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { |
953 | err = crypto_register_ahash(&sha_algs[i]); | 1214 | err = crypto_register_ahash(&sha_1_256_algs[i]); |
954 | if (err) | 1215 | if (err) |
955 | goto err_sha_algs; | 1216 | goto err_sha_1_256_algs; |
1217 | } | ||
1218 | |||
1219 | if (dd->caps.has_sha224) { | ||
1220 | err = crypto_register_ahash(&sha_224_alg); | ||
1221 | if (err) | ||
1222 | goto err_sha_224_algs; | ||
1223 | } | ||
1224 | |||
1225 | if (dd->caps.has_sha_384_512) { | ||
1226 | for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { | ||
1227 | err = crypto_register_ahash(&sha_384_512_algs[i]); | ||
1228 | if (err) | ||
1229 | goto err_sha_384_512_algs; | ||
1230 | } | ||
956 | } | 1231 | } |
957 | 1232 | ||
958 | return 0; | 1233 | return 0; |
959 | 1234 | ||
960 | err_sha_algs: | 1235 | err_sha_384_512_algs: |
1236 | for (j = 0; j < i; j++) | ||
1237 | crypto_unregister_ahash(&sha_384_512_algs[j]); | ||
1238 | crypto_unregister_ahash(&sha_224_alg); | ||
1239 | err_sha_224_algs: | ||
1240 | i = ARRAY_SIZE(sha_1_256_algs); | ||
1241 | err_sha_1_256_algs: | ||
961 | for (j = 0; j < i; j++) | 1242 | for (j = 0; j < i; j++) |
962 | crypto_unregister_ahash(&sha_algs[j]); | 1243 | crypto_unregister_ahash(&sha_1_256_algs[j]); |
963 | 1244 | ||
964 | return err; | 1245 | return err; |
965 | } | 1246 | } |
966 | 1247 | ||
1248 | static bool atmel_sha_filter(struct dma_chan *chan, void *slave) | ||
1249 | { | ||
1250 | struct at_dma_slave *sl = slave; | ||
1251 | |||
1252 | if (sl && sl->dma_dev == chan->device->dev) { | ||
1253 | chan->private = sl; | ||
1254 | return true; | ||
1255 | } else { | ||
1256 | return false; | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | static int atmel_sha_dma_init(struct atmel_sha_dev *dd, | ||
1261 | struct crypto_platform_data *pdata) | ||
1262 | { | ||
1263 | int err = -ENOMEM; | ||
1264 | dma_cap_mask_t mask_in; | ||
1265 | |||
1266 | if (pdata && pdata->dma_slave->rxdata.dma_dev) { | ||
1267 | /* Try to grab DMA channel */ | ||
1268 | dma_cap_zero(mask_in); | ||
1269 | dma_cap_set(DMA_SLAVE, mask_in); | ||
1270 | |||
1271 | dd->dma_lch_in.chan = dma_request_channel(mask_in, | ||
1272 | atmel_sha_filter, &pdata->dma_slave->rxdata); | ||
1273 | |||
1274 | if (!dd->dma_lch_in.chan) | ||
1275 | return err; | ||
1276 | |||
1277 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | ||
1278 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | ||
1279 | SHA_REG_DIN(0); | ||
1280 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | ||
1281 | dd->dma_lch_in.dma_conf.src_addr_width = | ||
1282 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1283 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
1284 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
1285 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1286 | dd->dma_lch_in.dma_conf.device_fc = false; | ||
1287 | |||
1288 | return 0; | ||
1289 | } | ||
1290 | |||
1291 | return -ENODEV; | ||
1292 | } | ||
1293 | |||
1294 | static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd) | ||
1295 | { | ||
1296 | dma_release_channel(dd->dma_lch_in.chan); | ||
1297 | } | ||
1298 | |||
1299 | static void atmel_sha_get_cap(struct atmel_sha_dev *dd) | ||
1300 | { | ||
1301 | |||
1302 | dd->caps.has_dma = 0; | ||
1303 | dd->caps.has_dualbuff = 0; | ||
1304 | dd->caps.has_sha224 = 0; | ||
1305 | dd->caps.has_sha_384_512 = 0; | ||
1306 | |||
1307 | /* keep only major version number */ | ||
1308 | switch (dd->hw_version & 0xff0) { | ||
1309 | case 0x410: | ||
1310 | dd->caps.has_dma = 1; | ||
1311 | dd->caps.has_dualbuff = 1; | ||
1312 | dd->caps.has_sha224 = 1; | ||
1313 | dd->caps.has_sha_384_512 = 1; | ||
1314 | break; | ||
1315 | case 0x400: | ||
1316 | dd->caps.has_dma = 1; | ||
1317 | dd->caps.has_dualbuff = 1; | ||
1318 | dd->caps.has_sha224 = 1; | ||
1319 | break; | ||
1320 | case 0x320: | ||
1321 | break; | ||
1322 | default: | ||
1323 | dev_warn(dd->dev, | ||
1324 | "Unmanaged sha version, set minimum capabilities\n"); | ||
1325 | break; | ||
1326 | } | ||
1327 | } | ||
1328 | |||
967 | static int atmel_sha_probe(struct platform_device *pdev) | 1329 | static int atmel_sha_probe(struct platform_device *pdev) |
968 | { | 1330 | { |
969 | struct atmel_sha_dev *sha_dd; | 1331 | struct atmel_sha_dev *sha_dd; |
1332 | struct crypto_platform_data *pdata; | ||
970 | struct device *dev = &pdev->dev; | 1333 | struct device *dev = &pdev->dev; |
971 | struct resource *sha_res; | 1334 | struct resource *sha_res; |
972 | unsigned long sha_phys_size; | 1335 | unsigned long sha_phys_size; |
@@ -1018,7 +1381,7 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
1018 | } | 1381 | } |
1019 | 1382 | ||
1020 | /* Initializing the clock */ | 1383 | /* Initializing the clock */ |
1021 | sha_dd->iclk = clk_get(&pdev->dev, NULL); | 1384 | sha_dd->iclk = clk_get(&pdev->dev, "sha_clk"); |
1022 | if (IS_ERR(sha_dd->iclk)) { | 1385 | if (IS_ERR(sha_dd->iclk)) { |
1023 | dev_err(dev, "clock intialization failed.\n"); | 1386 | dev_err(dev, "clock intialization failed.\n"); |
1024 | err = PTR_ERR(sha_dd->iclk); | 1387 | err = PTR_ERR(sha_dd->iclk); |
@@ -1032,6 +1395,22 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
1032 | goto sha_io_err; | 1395 | goto sha_io_err; |
1033 | } | 1396 | } |
1034 | 1397 | ||
1398 | atmel_sha_hw_version_init(sha_dd); | ||
1399 | |||
1400 | atmel_sha_get_cap(sha_dd); | ||
1401 | |||
1402 | if (sha_dd->caps.has_dma) { | ||
1403 | pdata = pdev->dev.platform_data; | ||
1404 | if (!pdata) { | ||
1405 | dev_err(&pdev->dev, "platform data not available\n"); | ||
1406 | err = -ENXIO; | ||
1407 | goto err_pdata; | ||
1408 | } | ||
1409 | err = atmel_sha_dma_init(sha_dd, pdata); | ||
1410 | if (err) | ||
1411 | goto err_sha_dma; | ||
1412 | } | ||
1413 | |||
1035 | spin_lock(&atmel_sha.lock); | 1414 | spin_lock(&atmel_sha.lock); |
1036 | list_add_tail(&sha_dd->list, &atmel_sha.dev_list); | 1415 | list_add_tail(&sha_dd->list, &atmel_sha.dev_list); |
1037 | spin_unlock(&atmel_sha.lock); | 1416 | spin_unlock(&atmel_sha.lock); |
@@ -1048,6 +1427,10 @@ err_algs: | |||
1048 | spin_lock(&atmel_sha.lock); | 1427 | spin_lock(&atmel_sha.lock); |
1049 | list_del(&sha_dd->list); | 1428 | list_del(&sha_dd->list); |
1050 | spin_unlock(&atmel_sha.lock); | 1429 | spin_unlock(&atmel_sha.lock); |
1430 | if (sha_dd->caps.has_dma) | ||
1431 | atmel_sha_dma_cleanup(sha_dd); | ||
1432 | err_sha_dma: | ||
1433 | err_pdata: | ||
1051 | iounmap(sha_dd->io_base); | 1434 | iounmap(sha_dd->io_base); |
1052 | sha_io_err: | 1435 | sha_io_err: |
1053 | clk_put(sha_dd->iclk); | 1436 | clk_put(sha_dd->iclk); |
@@ -1078,6 +1461,9 @@ static int atmel_sha_remove(struct platform_device *pdev) | |||
1078 | 1461 | ||
1079 | tasklet_kill(&sha_dd->done_task); | 1462 | tasklet_kill(&sha_dd->done_task); |
1080 | 1463 | ||
1464 | if (sha_dd->caps.has_dma) | ||
1465 | atmel_sha_dma_cleanup(sha_dd); | ||
1466 | |||
1081 | iounmap(sha_dd->io_base); | 1467 | iounmap(sha_dd->io_base); |
1082 | 1468 | ||
1083 | clk_put(sha_dd->iclk); | 1469 | clk_put(sha_dd->iclk); |
@@ -1102,6 +1488,6 @@ static struct platform_driver atmel_sha_driver = { | |||
1102 | 1488 | ||
1103 | module_platform_driver(atmel_sha_driver); | 1489 | module_platform_driver(atmel_sha_driver); |
1104 | 1490 | ||
1105 | MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support."); | 1491 | MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support."); |
1106 | MODULE_LICENSE("GPL v2"); | 1492 | MODULE_LICENSE("GPL v2"); |
1107 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); | 1493 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); |
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h index 5ac2a900d80c..f86734d0fda4 100644 --- a/drivers/crypto/atmel-tdes-regs.h +++ b/drivers/crypto/atmel-tdes-regs.h | |||
@@ -69,6 +69,8 @@ | |||
69 | #define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0) | 69 | #define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0) |
70 | #define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0 | 70 | #define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0 |
71 | 71 | ||
72 | #define TDES_HW_VERSION 0xFC | ||
73 | |||
72 | #define TDES_RPR 0x100 | 74 | #define TDES_RPR 0x100 |
73 | #define TDES_RCR 0x104 | 75 | #define TDES_RCR 0x104 |
74 | #define TDES_TPR 0x108 | 76 | #define TDES_TPR 0x108 |
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 7c73fbb17538..4a99564a08e6 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c | |||
@@ -38,29 +38,35 @@ | |||
38 | #include <crypto/des.h> | 38 | #include <crypto/des.h> |
39 | #include <crypto/hash.h> | 39 | #include <crypto/hash.h> |
40 | #include <crypto/internal/hash.h> | 40 | #include <crypto/internal/hash.h> |
41 | #include <linux/platform_data/crypto-atmel.h> | ||
41 | #include "atmel-tdes-regs.h" | 42 | #include "atmel-tdes-regs.h" |
42 | 43 | ||
43 | /* TDES flags */ | 44 | /* TDES flags */ |
44 | #define TDES_FLAGS_MODE_MASK 0x007f | 45 | #define TDES_FLAGS_MODE_MASK 0x00ff |
45 | #define TDES_FLAGS_ENCRYPT BIT(0) | 46 | #define TDES_FLAGS_ENCRYPT BIT(0) |
46 | #define TDES_FLAGS_CBC BIT(1) | 47 | #define TDES_FLAGS_CBC BIT(1) |
47 | #define TDES_FLAGS_CFB BIT(2) | 48 | #define TDES_FLAGS_CFB BIT(2) |
48 | #define TDES_FLAGS_CFB8 BIT(3) | 49 | #define TDES_FLAGS_CFB8 BIT(3) |
49 | #define TDES_FLAGS_CFB16 BIT(4) | 50 | #define TDES_FLAGS_CFB16 BIT(4) |
50 | #define TDES_FLAGS_CFB32 BIT(5) | 51 | #define TDES_FLAGS_CFB32 BIT(5) |
51 | #define TDES_FLAGS_OFB BIT(6) | 52 | #define TDES_FLAGS_CFB64 BIT(6) |
53 | #define TDES_FLAGS_OFB BIT(7) | ||
52 | 54 | ||
53 | #define TDES_FLAGS_INIT BIT(16) | 55 | #define TDES_FLAGS_INIT BIT(16) |
54 | #define TDES_FLAGS_FAST BIT(17) | 56 | #define TDES_FLAGS_FAST BIT(17) |
55 | #define TDES_FLAGS_BUSY BIT(18) | 57 | #define TDES_FLAGS_BUSY BIT(18) |
58 | #define TDES_FLAGS_DMA BIT(19) | ||
56 | 59 | ||
57 | #define ATMEL_TDES_QUEUE_LENGTH 1 | 60 | #define ATMEL_TDES_QUEUE_LENGTH 50 |
58 | 61 | ||
59 | #define CFB8_BLOCK_SIZE 1 | 62 | #define CFB8_BLOCK_SIZE 1 |
60 | #define CFB16_BLOCK_SIZE 2 | 63 | #define CFB16_BLOCK_SIZE 2 |
61 | #define CFB32_BLOCK_SIZE 4 | 64 | #define CFB32_BLOCK_SIZE 4 |
62 | #define CFB64_BLOCK_SIZE 8 | ||
63 | 65 | ||
66 | struct atmel_tdes_caps { | ||
67 | bool has_dma; | ||
68 | u32 has_cfb_3keys; | ||
69 | }; | ||
64 | 70 | ||
65 | struct atmel_tdes_dev; | 71 | struct atmel_tdes_dev; |
66 | 72 | ||
@@ -70,12 +76,19 @@ struct atmel_tdes_ctx { | |||
70 | int keylen; | 76 | int keylen; |
71 | u32 key[3*DES_KEY_SIZE / sizeof(u32)]; | 77 | u32 key[3*DES_KEY_SIZE / sizeof(u32)]; |
72 | unsigned long flags; | 78 | unsigned long flags; |
79 | |||
80 | u16 block_size; | ||
73 | }; | 81 | }; |
74 | 82 | ||
75 | struct atmel_tdes_reqctx { | 83 | struct atmel_tdes_reqctx { |
76 | unsigned long mode; | 84 | unsigned long mode; |
77 | }; | 85 | }; |
78 | 86 | ||
87 | struct atmel_tdes_dma { | ||
88 | struct dma_chan *chan; | ||
89 | struct dma_slave_config dma_conf; | ||
90 | }; | ||
91 | |||
79 | struct atmel_tdes_dev { | 92 | struct atmel_tdes_dev { |
80 | struct list_head list; | 93 | struct list_head list; |
81 | unsigned long phys_base; | 94 | unsigned long phys_base; |
@@ -99,8 +112,10 @@ struct atmel_tdes_dev { | |||
99 | size_t total; | 112 | size_t total; |
100 | 113 | ||
101 | struct scatterlist *in_sg; | 114 | struct scatterlist *in_sg; |
115 | unsigned int nb_in_sg; | ||
102 | size_t in_offset; | 116 | size_t in_offset; |
103 | struct scatterlist *out_sg; | 117 | struct scatterlist *out_sg; |
118 | unsigned int nb_out_sg; | ||
104 | size_t out_offset; | 119 | size_t out_offset; |
105 | 120 | ||
106 | size_t buflen; | 121 | size_t buflen; |
@@ -109,10 +124,16 @@ struct atmel_tdes_dev { | |||
109 | void *buf_in; | 124 | void *buf_in; |
110 | int dma_in; | 125 | int dma_in; |
111 | dma_addr_t dma_addr_in; | 126 | dma_addr_t dma_addr_in; |
127 | struct atmel_tdes_dma dma_lch_in; | ||
112 | 128 | ||
113 | void *buf_out; | 129 | void *buf_out; |
114 | int dma_out; | 130 | int dma_out; |
115 | dma_addr_t dma_addr_out; | 131 | dma_addr_t dma_addr_out; |
132 | struct atmel_tdes_dma dma_lch_out; | ||
133 | |||
134 | struct atmel_tdes_caps caps; | ||
135 | |||
136 | u32 hw_version; | ||
116 | }; | 137 | }; |
117 | 138 | ||
118 | struct atmel_tdes_drv { | 139 | struct atmel_tdes_drv { |
@@ -207,6 +228,31 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd) | |||
207 | return 0; | 228 | return 0; |
208 | } | 229 | } |
209 | 230 | ||
231 | static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd) | ||
232 | { | ||
233 | return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff; | ||
234 | } | ||
235 | |||
236 | static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd) | ||
237 | { | ||
238 | atmel_tdes_hw_init(dd); | ||
239 | |||
240 | dd->hw_version = atmel_tdes_get_version(dd); | ||
241 | |||
242 | dev_info(dd->dev, | ||
243 | "version: 0x%x\n", dd->hw_version); | ||
244 | |||
245 | clk_disable_unprepare(dd->iclk); | ||
246 | } | ||
247 | |||
248 | static void atmel_tdes_dma_callback(void *data) | ||
249 | { | ||
250 | struct atmel_tdes_dev *dd = data; | ||
251 | |||
252 | /* dma_lch_out - completed */ | ||
253 | tasklet_schedule(&dd->done_task); | ||
254 | } | ||
255 | |||
210 | static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) | 256 | static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) |
211 | { | 257 | { |
212 | int err; | 258 | int err; |
@@ -217,7 +263,9 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) | |||
217 | if (err) | 263 | if (err) |
218 | return err; | 264 | return err; |
219 | 265 | ||
220 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); | 266 | if (!dd->caps.has_dma) |
267 | atmel_tdes_write(dd, TDES_PTCR, | ||
268 | TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS); | ||
221 | 269 | ||
222 | /* MR register must be set before IV registers */ | 270 | /* MR register must be set before IV registers */ |
223 | if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) { | 271 | if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) { |
@@ -241,6 +289,8 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) | |||
241 | valmr |= TDES_MR_CFBS_16b; | 289 | valmr |= TDES_MR_CFBS_16b; |
242 | else if (dd->flags & TDES_FLAGS_CFB32) | 290 | else if (dd->flags & TDES_FLAGS_CFB32) |
243 | valmr |= TDES_MR_CFBS_32b; | 291 | valmr |= TDES_MR_CFBS_32b; |
292 | else if (dd->flags & TDES_FLAGS_CFB64) | ||
293 | valmr |= TDES_MR_CFBS_64b; | ||
244 | } else if (dd->flags & TDES_FLAGS_OFB) { | 294 | } else if (dd->flags & TDES_FLAGS_OFB) { |
245 | valmr |= TDES_MR_OPMOD_OFB; | 295 | valmr |= TDES_MR_OPMOD_OFB; |
246 | } | 296 | } |
@@ -262,7 +312,7 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) | |||
262 | return 0; | 312 | return 0; |
263 | } | 313 | } |
264 | 314 | ||
265 | static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd) | 315 | static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd) |
266 | { | 316 | { |
267 | int err = 0; | 317 | int err = 0; |
268 | size_t count; | 318 | size_t count; |
@@ -288,7 +338,7 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd) | |||
288 | return err; | 338 | return err; |
289 | } | 339 | } |
290 | 340 | ||
291 | static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd) | 341 | static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd) |
292 | { | 342 | { |
293 | int err = -ENOMEM; | 343 | int err = -ENOMEM; |
294 | 344 | ||
@@ -333,7 +383,7 @@ err_alloc: | |||
333 | return err; | 383 | return err; |
334 | } | 384 | } |
335 | 385 | ||
336 | static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) | 386 | static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd) |
337 | { | 387 | { |
338 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | 388 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, |
339 | DMA_FROM_DEVICE); | 389 | DMA_FROM_DEVICE); |
@@ -343,7 +393,7 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) | |||
343 | free_page((unsigned long)dd->buf_in); | 393 | free_page((unsigned long)dd->buf_in); |
344 | } | 394 | } |
345 | 395 | ||
346 | static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | 396 | static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, |
347 | dma_addr_t dma_addr_out, int length) | 397 | dma_addr_t dma_addr_out, int length) |
348 | { | 398 | { |
349 | struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); | 399 | struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); |
@@ -379,7 +429,76 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | |||
379 | return 0; | 429 | return 0; |
380 | } | 430 | } |
381 | 431 | ||
382 | static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd) | 432 | static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, |
433 | dma_addr_t dma_addr_out, int length) | ||
434 | { | ||
435 | struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
436 | struct atmel_tdes_dev *dd = ctx->dd; | ||
437 | struct scatterlist sg[2]; | ||
438 | struct dma_async_tx_descriptor *in_desc, *out_desc; | ||
439 | |||
440 | dd->dma_size = length; | ||
441 | |||
442 | if (!(dd->flags & TDES_FLAGS_FAST)) { | ||
443 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | ||
444 | DMA_TO_DEVICE); | ||
445 | } | ||
446 | |||
447 | if (dd->flags & TDES_FLAGS_CFB8) { | ||
448 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
449 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
450 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
451 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
452 | } else if (dd->flags & TDES_FLAGS_CFB16) { | ||
453 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
454 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
455 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
456 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
457 | } else { | ||
458 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
459 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
460 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
461 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
462 | } | ||
463 | |||
464 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | ||
465 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | ||
466 | |||
467 | dd->flags |= TDES_FLAGS_DMA; | ||
468 | |||
469 | sg_init_table(&sg[0], 1); | ||
470 | sg_dma_address(&sg[0]) = dma_addr_in; | ||
471 | sg_dma_len(&sg[0]) = length; | ||
472 | |||
473 | sg_init_table(&sg[1], 1); | ||
474 | sg_dma_address(&sg[1]) = dma_addr_out; | ||
475 | sg_dma_len(&sg[1]) = length; | ||
476 | |||
477 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], | ||
478 | 1, DMA_MEM_TO_DEV, | ||
479 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
480 | if (!in_desc) | ||
481 | return -EINVAL; | ||
482 | |||
483 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], | ||
484 | 1, DMA_DEV_TO_MEM, | ||
485 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
486 | if (!out_desc) | ||
487 | return -EINVAL; | ||
488 | |||
489 | out_desc->callback = atmel_tdes_dma_callback; | ||
490 | out_desc->callback_param = dd; | ||
491 | |||
492 | dmaengine_submit(out_desc); | ||
493 | dma_async_issue_pending(dd->dma_lch_out.chan); | ||
494 | |||
495 | dmaengine_submit(in_desc); | ||
496 | dma_async_issue_pending(dd->dma_lch_in.chan); | ||
497 | |||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd) | ||
383 | { | 502 | { |
384 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( | 503 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( |
385 | crypto_ablkcipher_reqtfm(dd->req)); | 504 | crypto_ablkcipher_reqtfm(dd->req)); |
@@ -387,23 +506,23 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd) | |||
387 | size_t count; | 506 | size_t count; |
388 | dma_addr_t addr_in, addr_out; | 507 | dma_addr_t addr_in, addr_out; |
389 | 508 | ||
390 | if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { | 509 | if ((!dd->in_offset) && (!dd->out_offset)) { |
391 | /* check for alignment */ | 510 | /* check for alignment */ |
392 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); | 511 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && |
393 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); | 512 | IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); |
394 | 513 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && | |
514 | IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); | ||
395 | fast = in && out; | 515 | fast = in && out; |
516 | |||
517 | if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) | ||
518 | fast = 0; | ||
396 | } | 519 | } |
397 | 520 | ||
521 | |||
398 | if (fast) { | 522 | if (fast) { |
399 | count = min(dd->total, sg_dma_len(dd->in_sg)); | 523 | count = min(dd->total, sg_dma_len(dd->in_sg)); |
400 | count = min(count, sg_dma_len(dd->out_sg)); | 524 | count = min(count, sg_dma_len(dd->out_sg)); |
401 | 525 | ||
402 | if (count != dd->total) { | ||
403 | pr_err("request length != buffer length\n"); | ||
404 | return -EINVAL; | ||
405 | } | ||
406 | |||
407 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | 526 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); |
408 | if (!err) { | 527 | if (!err) { |
409 | dev_err(dd->dev, "dma_map_sg() error\n"); | 528 | dev_err(dd->dev, "dma_map_sg() error\n"); |
@@ -433,13 +552,16 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd) | |||
433 | addr_out = dd->dma_addr_out; | 552 | addr_out = dd->dma_addr_out; |
434 | 553 | ||
435 | dd->flags &= ~TDES_FLAGS_FAST; | 554 | dd->flags &= ~TDES_FLAGS_FAST; |
436 | |||
437 | } | 555 | } |
438 | 556 | ||
439 | dd->total -= count; | 557 | dd->total -= count; |
440 | 558 | ||
441 | err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count); | 559 | if (dd->caps.has_dma) |
442 | if (err) { | 560 | err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count); |
561 | else | ||
562 | err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count); | ||
563 | |||
564 | if (err && (dd->flags & TDES_FLAGS_FAST)) { | ||
443 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | 565 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); |
444 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | 566 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); |
445 | } | 567 | } |
@@ -447,7 +569,6 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd) | |||
447 | return err; | 569 | return err; |
448 | } | 570 | } |
449 | 571 | ||
450 | |||
451 | static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err) | 572 | static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err) |
452 | { | 573 | { |
453 | struct ablkcipher_request *req = dd->req; | 574 | struct ablkcipher_request *req = dd->req; |
@@ -506,7 +627,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd, | |||
506 | 627 | ||
507 | err = atmel_tdes_write_ctrl(dd); | 628 | err = atmel_tdes_write_ctrl(dd); |
508 | if (!err) | 629 | if (!err) |
509 | err = atmel_tdes_crypt_dma_start(dd); | 630 | err = atmel_tdes_crypt_start(dd); |
510 | if (err) { | 631 | if (err) { |
511 | /* des_task will not finish it, so do it here */ | 632 | /* des_task will not finish it, so do it here */ |
512 | atmel_tdes_finish_req(dd, err); | 633 | atmel_tdes_finish_req(dd, err); |
@@ -516,41 +637,145 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd, | |||
516 | return ret; | 637 | return ret; |
517 | } | 638 | } |
518 | 639 | ||
640 | static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd) | ||
641 | { | ||
642 | int err = -EINVAL; | ||
643 | size_t count; | ||
644 | |||
645 | if (dd->flags & TDES_FLAGS_DMA) { | ||
646 | err = 0; | ||
647 | if (dd->flags & TDES_FLAGS_FAST) { | ||
648 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
649 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
650 | } else { | ||
651 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | ||
652 | dd->dma_size, DMA_FROM_DEVICE); | ||
653 | |||
654 | /* copy data */ | ||
655 | count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset, | ||
656 | dd->buf_out, dd->buflen, dd->dma_size, 1); | ||
657 | if (count != dd->dma_size) { | ||
658 | err = -EINVAL; | ||
659 | pr_err("not all data converted: %u\n", count); | ||
660 | } | ||
661 | } | ||
662 | } | ||
663 | return err; | ||
664 | } | ||
519 | 665 | ||
520 | static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode) | 666 | static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode) |
521 | { | 667 | { |
522 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx( | 668 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx( |
523 | crypto_ablkcipher_reqtfm(req)); | 669 | crypto_ablkcipher_reqtfm(req)); |
524 | struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req); | 670 | struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req); |
525 | struct atmel_tdes_dev *dd; | ||
526 | 671 | ||
527 | if (mode & TDES_FLAGS_CFB8) { | 672 | if (mode & TDES_FLAGS_CFB8) { |
528 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { | 673 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { |
529 | pr_err("request size is not exact amount of CFB8 blocks\n"); | 674 | pr_err("request size is not exact amount of CFB8 blocks\n"); |
530 | return -EINVAL; | 675 | return -EINVAL; |
531 | } | 676 | } |
677 | ctx->block_size = CFB8_BLOCK_SIZE; | ||
532 | } else if (mode & TDES_FLAGS_CFB16) { | 678 | } else if (mode & TDES_FLAGS_CFB16) { |
533 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { | 679 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { |
534 | pr_err("request size is not exact amount of CFB16 blocks\n"); | 680 | pr_err("request size is not exact amount of CFB16 blocks\n"); |
535 | return -EINVAL; | 681 | return -EINVAL; |
536 | } | 682 | } |
683 | ctx->block_size = CFB16_BLOCK_SIZE; | ||
537 | } else if (mode & TDES_FLAGS_CFB32) { | 684 | } else if (mode & TDES_FLAGS_CFB32) { |
538 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { | 685 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { |
539 | pr_err("request size is not exact amount of CFB32 blocks\n"); | 686 | pr_err("request size is not exact amount of CFB32 blocks\n"); |
540 | return -EINVAL; | 687 | return -EINVAL; |
541 | } | 688 | } |
542 | } else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) { | 689 | ctx->block_size = CFB32_BLOCK_SIZE; |
543 | pr_err("request size is not exact amount of DES blocks\n"); | 690 | } else { |
544 | return -EINVAL; | 691 | if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) { |
692 | pr_err("request size is not exact amount of DES blocks\n"); | ||
693 | return -EINVAL; | ||
694 | } | ||
695 | ctx->block_size = DES_BLOCK_SIZE; | ||
545 | } | 696 | } |
546 | 697 | ||
547 | dd = atmel_tdes_find_dev(ctx); | 698 | rctx->mode = mode; |
548 | if (!dd) | 699 | |
700 | return atmel_tdes_handle_queue(ctx->dd, req); | ||
701 | } | ||
702 | |||
703 | static bool atmel_tdes_filter(struct dma_chan *chan, void *slave) | ||
704 | { | ||
705 | struct at_dma_slave *sl = slave; | ||
706 | |||
707 | if (sl && sl->dma_dev == chan->device->dev) { | ||
708 | chan->private = sl; | ||
709 | return true; | ||
710 | } else { | ||
711 | return false; | ||
712 | } | ||
713 | } | ||
714 | |||
715 | static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd, | ||
716 | struct crypto_platform_data *pdata) | ||
717 | { | ||
718 | int err = -ENOMEM; | ||
719 | dma_cap_mask_t mask_in, mask_out; | ||
720 | |||
721 | if (pdata && pdata->dma_slave->txdata.dma_dev && | ||
722 | pdata->dma_slave->rxdata.dma_dev) { | ||
723 | |||
724 | /* Try to grab 2 DMA channels */ | ||
725 | dma_cap_zero(mask_in); | ||
726 | dma_cap_set(DMA_SLAVE, mask_in); | ||
727 | |||
728 | dd->dma_lch_in.chan = dma_request_channel(mask_in, | ||
729 | atmel_tdes_filter, &pdata->dma_slave->rxdata); | ||
730 | |||
731 | if (!dd->dma_lch_in.chan) | ||
732 | goto err_dma_in; | ||
733 | |||
734 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | ||
735 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | ||
736 | TDES_IDATA1R; | ||
737 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | ||
738 | dd->dma_lch_in.dma_conf.src_addr_width = | ||
739 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
740 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
741 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
742 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
743 | dd->dma_lch_in.dma_conf.device_fc = false; | ||
744 | |||
745 | dma_cap_zero(mask_out); | ||
746 | dma_cap_set(DMA_SLAVE, mask_out); | ||
747 | dd->dma_lch_out.chan = dma_request_channel(mask_out, | ||
748 | atmel_tdes_filter, &pdata->dma_slave->txdata); | ||
749 | |||
750 | if (!dd->dma_lch_out.chan) | ||
751 | goto err_dma_out; | ||
752 | |||
753 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; | ||
754 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + | ||
755 | TDES_ODATA1R; | ||
756 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | ||
757 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
758 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
759 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | ||
760 | dd->dma_lch_out.dma_conf.dst_addr_width = | ||
761 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
762 | dd->dma_lch_out.dma_conf.device_fc = false; | ||
763 | |||
764 | return 0; | ||
765 | } else { | ||
549 | return -ENODEV; | 766 | return -ENODEV; |
767 | } | ||
550 | 768 | ||
551 | rctx->mode = mode; | 769 | err_dma_out: |
770 | dma_release_channel(dd->dma_lch_in.chan); | ||
771 | err_dma_in: | ||
772 | return err; | ||
773 | } | ||
552 | 774 | ||
553 | return atmel_tdes_handle_queue(dd, req); | 775 | static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) |
776 | { | ||
777 | dma_release_channel(dd->dma_lch_in.chan); | ||
778 | dma_release_channel(dd->dma_lch_out.chan); | ||
554 | } | 779 | } |
555 | 780 | ||
556 | static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 781 | static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
@@ -590,7 +815,8 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
590 | /* | 815 | /* |
591 | * HW bug in cfb 3-keys mode. | 816 | * HW bug in cfb 3-keys mode. |
592 | */ | 817 | */ |
593 | if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) { | 818 | if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb") |
819 | && (keylen != 2*DES_KEY_SIZE)) { | ||
594 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 820 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
595 | return -EINVAL; | 821 | return -EINVAL; |
596 | } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) { | 822 | } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) { |
@@ -678,8 +904,15 @@ static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req) | |||
678 | 904 | ||
679 | static int atmel_tdes_cra_init(struct crypto_tfm *tfm) | 905 | static int atmel_tdes_cra_init(struct crypto_tfm *tfm) |
680 | { | 906 | { |
907 | struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
908 | struct atmel_tdes_dev *dd; | ||
909 | |||
681 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx); | 910 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx); |
682 | 911 | ||
912 | dd = atmel_tdes_find_dev(ctx); | ||
913 | if (!dd) | ||
914 | return -ENODEV; | ||
915 | |||
683 | return 0; | 916 | return 0; |
684 | } | 917 | } |
685 | 918 | ||
@@ -695,7 +928,7 @@ static struct crypto_alg tdes_algs[] = { | |||
695 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 928 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
696 | .cra_blocksize = DES_BLOCK_SIZE, | 929 | .cra_blocksize = DES_BLOCK_SIZE, |
697 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 930 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
698 | .cra_alignmask = 0, | 931 | .cra_alignmask = 0x7, |
699 | .cra_type = &crypto_ablkcipher_type, | 932 | .cra_type = &crypto_ablkcipher_type, |
700 | .cra_module = THIS_MODULE, | 933 | .cra_module = THIS_MODULE, |
701 | .cra_init = atmel_tdes_cra_init, | 934 | .cra_init = atmel_tdes_cra_init, |
@@ -715,7 +948,7 @@ static struct crypto_alg tdes_algs[] = { | |||
715 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 948 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
716 | .cra_blocksize = DES_BLOCK_SIZE, | 949 | .cra_blocksize = DES_BLOCK_SIZE, |
717 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 950 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
718 | .cra_alignmask = 0, | 951 | .cra_alignmask = 0x7, |
719 | .cra_type = &crypto_ablkcipher_type, | 952 | .cra_type = &crypto_ablkcipher_type, |
720 | .cra_module = THIS_MODULE, | 953 | .cra_module = THIS_MODULE, |
721 | .cra_init = atmel_tdes_cra_init, | 954 | .cra_init = atmel_tdes_cra_init, |
@@ -736,7 +969,7 @@ static struct crypto_alg tdes_algs[] = { | |||
736 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 969 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
737 | .cra_blocksize = DES_BLOCK_SIZE, | 970 | .cra_blocksize = DES_BLOCK_SIZE, |
738 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 971 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
739 | .cra_alignmask = 0, | 972 | .cra_alignmask = 0x7, |
740 | .cra_type = &crypto_ablkcipher_type, | 973 | .cra_type = &crypto_ablkcipher_type, |
741 | .cra_module = THIS_MODULE, | 974 | .cra_module = THIS_MODULE, |
742 | .cra_init = atmel_tdes_cra_init, | 975 | .cra_init = atmel_tdes_cra_init, |
@@ -778,7 +1011,7 @@ static struct crypto_alg tdes_algs[] = { | |||
778 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1011 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
779 | .cra_blocksize = CFB16_BLOCK_SIZE, | 1012 | .cra_blocksize = CFB16_BLOCK_SIZE, |
780 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1013 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
781 | .cra_alignmask = 0, | 1014 | .cra_alignmask = 0x1, |
782 | .cra_type = &crypto_ablkcipher_type, | 1015 | .cra_type = &crypto_ablkcipher_type, |
783 | .cra_module = THIS_MODULE, | 1016 | .cra_module = THIS_MODULE, |
784 | .cra_init = atmel_tdes_cra_init, | 1017 | .cra_init = atmel_tdes_cra_init, |
@@ -799,7 +1032,7 @@ static struct crypto_alg tdes_algs[] = { | |||
799 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1032 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
800 | .cra_blocksize = CFB32_BLOCK_SIZE, | 1033 | .cra_blocksize = CFB32_BLOCK_SIZE, |
801 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1034 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
802 | .cra_alignmask = 0, | 1035 | .cra_alignmask = 0x3, |
803 | .cra_type = &crypto_ablkcipher_type, | 1036 | .cra_type = &crypto_ablkcipher_type, |
804 | .cra_module = THIS_MODULE, | 1037 | .cra_module = THIS_MODULE, |
805 | .cra_init = atmel_tdes_cra_init, | 1038 | .cra_init = atmel_tdes_cra_init, |
@@ -820,7 +1053,7 @@ static struct crypto_alg tdes_algs[] = { | |||
820 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1053 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
821 | .cra_blocksize = DES_BLOCK_SIZE, | 1054 | .cra_blocksize = DES_BLOCK_SIZE, |
822 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1055 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
823 | .cra_alignmask = 0, | 1056 | .cra_alignmask = 0x7, |
824 | .cra_type = &crypto_ablkcipher_type, | 1057 | .cra_type = &crypto_ablkcipher_type, |
825 | .cra_module = THIS_MODULE, | 1058 | .cra_module = THIS_MODULE, |
826 | .cra_init = atmel_tdes_cra_init, | 1059 | .cra_init = atmel_tdes_cra_init, |
@@ -841,7 +1074,7 @@ static struct crypto_alg tdes_algs[] = { | |||
841 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1074 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
842 | .cra_blocksize = DES_BLOCK_SIZE, | 1075 | .cra_blocksize = DES_BLOCK_SIZE, |
843 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1076 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
844 | .cra_alignmask = 0, | 1077 | .cra_alignmask = 0x7, |
845 | .cra_type = &crypto_ablkcipher_type, | 1078 | .cra_type = &crypto_ablkcipher_type, |
846 | .cra_module = THIS_MODULE, | 1079 | .cra_module = THIS_MODULE, |
847 | .cra_init = atmel_tdes_cra_init, | 1080 | .cra_init = atmel_tdes_cra_init, |
@@ -861,7 +1094,7 @@ static struct crypto_alg tdes_algs[] = { | |||
861 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1094 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
862 | .cra_blocksize = DES_BLOCK_SIZE, | 1095 | .cra_blocksize = DES_BLOCK_SIZE, |
863 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1096 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
864 | .cra_alignmask = 0, | 1097 | .cra_alignmask = 0x7, |
865 | .cra_type = &crypto_ablkcipher_type, | 1098 | .cra_type = &crypto_ablkcipher_type, |
866 | .cra_module = THIS_MODULE, | 1099 | .cra_module = THIS_MODULE, |
867 | .cra_init = atmel_tdes_cra_init, | 1100 | .cra_init = atmel_tdes_cra_init, |
@@ -882,7 +1115,7 @@ static struct crypto_alg tdes_algs[] = { | |||
882 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1115 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
883 | .cra_blocksize = DES_BLOCK_SIZE, | 1116 | .cra_blocksize = DES_BLOCK_SIZE, |
884 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1117 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
885 | .cra_alignmask = 0, | 1118 | .cra_alignmask = 0x7, |
886 | .cra_type = &crypto_ablkcipher_type, | 1119 | .cra_type = &crypto_ablkcipher_type, |
887 | .cra_module = THIS_MODULE, | 1120 | .cra_module = THIS_MODULE, |
888 | .cra_init = atmel_tdes_cra_init, | 1121 | .cra_init = atmel_tdes_cra_init, |
@@ -924,7 +1157,7 @@ static struct crypto_alg tdes_algs[] = { | |||
924 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1157 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
925 | .cra_blocksize = CFB16_BLOCK_SIZE, | 1158 | .cra_blocksize = CFB16_BLOCK_SIZE, |
926 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1159 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
927 | .cra_alignmask = 0, | 1160 | .cra_alignmask = 0x1, |
928 | .cra_type = &crypto_ablkcipher_type, | 1161 | .cra_type = &crypto_ablkcipher_type, |
929 | .cra_module = THIS_MODULE, | 1162 | .cra_module = THIS_MODULE, |
930 | .cra_init = atmel_tdes_cra_init, | 1163 | .cra_init = atmel_tdes_cra_init, |
@@ -945,7 +1178,7 @@ static struct crypto_alg tdes_algs[] = { | |||
945 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1178 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
946 | .cra_blocksize = CFB32_BLOCK_SIZE, | 1179 | .cra_blocksize = CFB32_BLOCK_SIZE, |
947 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1180 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
948 | .cra_alignmask = 0, | 1181 | .cra_alignmask = 0x3, |
949 | .cra_type = &crypto_ablkcipher_type, | 1182 | .cra_type = &crypto_ablkcipher_type, |
950 | .cra_module = THIS_MODULE, | 1183 | .cra_module = THIS_MODULE, |
951 | .cra_init = atmel_tdes_cra_init, | 1184 | .cra_init = atmel_tdes_cra_init, |
@@ -966,7 +1199,7 @@ static struct crypto_alg tdes_algs[] = { | |||
966 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1199 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
967 | .cra_blocksize = DES_BLOCK_SIZE, | 1200 | .cra_blocksize = DES_BLOCK_SIZE, |
968 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | 1201 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), |
969 | .cra_alignmask = 0, | 1202 | .cra_alignmask = 0x7, |
970 | .cra_type = &crypto_ablkcipher_type, | 1203 | .cra_type = &crypto_ablkcipher_type, |
971 | .cra_module = THIS_MODULE, | 1204 | .cra_module = THIS_MODULE, |
972 | .cra_init = atmel_tdes_cra_init, | 1205 | .cra_init = atmel_tdes_cra_init, |
@@ -994,14 +1227,24 @@ static void atmel_tdes_done_task(unsigned long data) | |||
994 | struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data; | 1227 | struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data; |
995 | int err; | 1228 | int err; |
996 | 1229 | ||
997 | err = atmel_tdes_crypt_dma_stop(dd); | 1230 | if (!(dd->flags & TDES_FLAGS_DMA)) |
1231 | err = atmel_tdes_crypt_pdc_stop(dd); | ||
1232 | else | ||
1233 | err = atmel_tdes_crypt_dma_stop(dd); | ||
998 | 1234 | ||
999 | err = dd->err ? : err; | 1235 | err = dd->err ? : err; |
1000 | 1236 | ||
1001 | if (dd->total && !err) { | 1237 | if (dd->total && !err) { |
1002 | err = atmel_tdes_crypt_dma_start(dd); | 1238 | if (dd->flags & TDES_FLAGS_FAST) { |
1239 | dd->in_sg = sg_next(dd->in_sg); | ||
1240 | dd->out_sg = sg_next(dd->out_sg); | ||
1241 | if (!dd->in_sg || !dd->out_sg) | ||
1242 | err = -EINVAL; | ||
1243 | } | ||
1003 | if (!err) | 1244 | if (!err) |
1004 | return; | 1245 | err = atmel_tdes_crypt_start(dd); |
1246 | if (!err) | ||
1247 | return; /* DMA started. Not fininishing. */ | ||
1005 | } | 1248 | } |
1006 | 1249 | ||
1007 | atmel_tdes_finish_req(dd, err); | 1250 | atmel_tdes_finish_req(dd, err); |
@@ -1053,9 +1296,31 @@ err_tdes_algs: | |||
1053 | return err; | 1296 | return err; |
1054 | } | 1297 | } |
1055 | 1298 | ||
1299 | static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd) | ||
1300 | { | ||
1301 | |||
1302 | dd->caps.has_dma = 0; | ||
1303 | dd->caps.has_cfb_3keys = 0; | ||
1304 | |||
1305 | /* keep only major version number */ | ||
1306 | switch (dd->hw_version & 0xf00) { | ||
1307 | case 0x700: | ||
1308 | dd->caps.has_dma = 1; | ||
1309 | dd->caps.has_cfb_3keys = 1; | ||
1310 | break; | ||
1311 | case 0x600: | ||
1312 | break; | ||
1313 | default: | ||
1314 | dev_warn(dd->dev, | ||
1315 | "Unmanaged tdes version, set minimum capabilities\n"); | ||
1316 | break; | ||
1317 | } | ||
1318 | } | ||
1319 | |||
1056 | static int atmel_tdes_probe(struct platform_device *pdev) | 1320 | static int atmel_tdes_probe(struct platform_device *pdev) |
1057 | { | 1321 | { |
1058 | struct atmel_tdes_dev *tdes_dd; | 1322 | struct atmel_tdes_dev *tdes_dd; |
1323 | struct crypto_platform_data *pdata; | ||
1059 | struct device *dev = &pdev->dev; | 1324 | struct device *dev = &pdev->dev; |
1060 | struct resource *tdes_res; | 1325 | struct resource *tdes_res; |
1061 | unsigned long tdes_phys_size; | 1326 | unsigned long tdes_phys_size; |
@@ -1109,7 +1374,7 @@ static int atmel_tdes_probe(struct platform_device *pdev) | |||
1109 | } | 1374 | } |
1110 | 1375 | ||
1111 | /* Initializing the clock */ | 1376 | /* Initializing the clock */ |
1112 | tdes_dd->iclk = clk_get(&pdev->dev, NULL); | 1377 | tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk"); |
1113 | if (IS_ERR(tdes_dd->iclk)) { | 1378 | if (IS_ERR(tdes_dd->iclk)) { |
1114 | dev_err(dev, "clock intialization failed.\n"); | 1379 | dev_err(dev, "clock intialization failed.\n"); |
1115 | err = PTR_ERR(tdes_dd->iclk); | 1380 | err = PTR_ERR(tdes_dd->iclk); |
@@ -1123,9 +1388,25 @@ static int atmel_tdes_probe(struct platform_device *pdev) | |||
1123 | goto tdes_io_err; | 1388 | goto tdes_io_err; |
1124 | } | 1389 | } |
1125 | 1390 | ||
1126 | err = atmel_tdes_dma_init(tdes_dd); | 1391 | atmel_tdes_hw_version_init(tdes_dd); |
1392 | |||
1393 | atmel_tdes_get_cap(tdes_dd); | ||
1394 | |||
1395 | err = atmel_tdes_buff_init(tdes_dd); | ||
1127 | if (err) | 1396 | if (err) |
1128 | goto err_tdes_dma; | 1397 | goto err_tdes_buff; |
1398 | |||
1399 | if (tdes_dd->caps.has_dma) { | ||
1400 | pdata = pdev->dev.platform_data; | ||
1401 | if (!pdata) { | ||
1402 | dev_err(&pdev->dev, "platform data not available\n"); | ||
1403 | err = -ENXIO; | ||
1404 | goto err_pdata; | ||
1405 | } | ||
1406 | err = atmel_tdes_dma_init(tdes_dd, pdata); | ||
1407 | if (err) | ||
1408 | goto err_tdes_dma; | ||
1409 | } | ||
1129 | 1410 | ||
1130 | spin_lock(&atmel_tdes.lock); | 1411 | spin_lock(&atmel_tdes.lock); |
1131 | list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list); | 1412 | list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list); |
@@ -1143,8 +1424,12 @@ err_algs: | |||
1143 | spin_lock(&atmel_tdes.lock); | 1424 | spin_lock(&atmel_tdes.lock); |
1144 | list_del(&tdes_dd->list); | 1425 | list_del(&tdes_dd->list); |
1145 | spin_unlock(&atmel_tdes.lock); | 1426 | spin_unlock(&atmel_tdes.lock); |
1146 | atmel_tdes_dma_cleanup(tdes_dd); | 1427 | if (tdes_dd->caps.has_dma) |
1428 | atmel_tdes_dma_cleanup(tdes_dd); | ||
1147 | err_tdes_dma: | 1429 | err_tdes_dma: |
1430 | err_pdata: | ||
1431 | atmel_tdes_buff_cleanup(tdes_dd); | ||
1432 | err_tdes_buff: | ||
1148 | iounmap(tdes_dd->io_base); | 1433 | iounmap(tdes_dd->io_base); |
1149 | tdes_io_err: | 1434 | tdes_io_err: |
1150 | clk_put(tdes_dd->iclk); | 1435 | clk_put(tdes_dd->iclk); |
@@ -1178,7 +1463,10 @@ static int atmel_tdes_remove(struct platform_device *pdev) | |||
1178 | tasklet_kill(&tdes_dd->done_task); | 1463 | tasklet_kill(&tdes_dd->done_task); |
1179 | tasklet_kill(&tdes_dd->queue_task); | 1464 | tasklet_kill(&tdes_dd->queue_task); |
1180 | 1465 | ||
1181 | atmel_tdes_dma_cleanup(tdes_dd); | 1466 | if (tdes_dd->caps.has_dma) |
1467 | atmel_tdes_dma_cleanup(tdes_dd); | ||
1468 | |||
1469 | atmel_tdes_buff_cleanup(tdes_dd); | ||
1182 | 1470 | ||
1183 | iounmap(tdes_dd->io_base); | 1471 | iounmap(tdes_dd->io_base); |
1184 | 1472 | ||
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index 827913d7d33a..d797f31f5d85 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c | |||
@@ -151,7 +151,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req) | |||
151 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | 151 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); |
152 | struct bfin_crypto_crc *crc; | 152 | struct bfin_crypto_crc *crc; |
153 | 153 | ||
154 | dev_dbg(crc->dev, "crc_init\n"); | 154 | dev_dbg(ctx->crc->dev, "crc_init\n"); |
155 | spin_lock_bh(&crc_list.lock); | 155 | spin_lock_bh(&crc_list.lock); |
156 | list_for_each_entry(crc, &crc_list.dev_list, list) { | 156 | list_for_each_entry(crc, &crc_list.dev_list, list) { |
157 | crc_ctx->crc = crc; | 157 | crc_ctx->crc = crc; |
@@ -160,7 +160,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req) | |||
160 | spin_unlock_bh(&crc_list.lock); | 160 | spin_unlock_bh(&crc_list.lock); |
161 | 161 | ||
162 | if (sg_count(req->src) > CRC_MAX_DMA_DESC) { | 162 | if (sg_count(req->src) > CRC_MAX_DMA_DESC) { |
163 | dev_dbg(crc->dev, "init: requested sg list is too big > %d\n", | 163 | dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n", |
164 | CRC_MAX_DMA_DESC); | 164 | CRC_MAX_DMA_DESC); |
165 | return -EINVAL; | 165 | return -EINVAL; |
166 | } | 166 | } |
@@ -175,7 +175,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req) | |||
175 | /* init crc results */ | 175 | /* init crc results */ |
176 | put_unaligned_le32(crc_ctx->key, req->result); | 176 | put_unaligned_le32(crc_ctx->key, req->result); |
177 | 177 | ||
178 | dev_dbg(crc->dev, "init: digest size: %d\n", | 178 | dev_dbg(ctx->crc->dev, "init: digest size: %d\n", |
179 | crypto_ahash_digestsize(tfm)); | 179 | crypto_ahash_digestsize(tfm)); |
180 | 180 | ||
181 | return bfin_crypto_crc_init_hw(crc, crc_ctx->key); | 181 | return bfin_crypto_crc_init_hw(crc, crc_ctx->key); |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 65c7668614ab..b44091c47f75 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
@@ -78,7 +78,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API | |||
78 | tristate "Register hash algorithm implementations with Crypto API" | 78 | tristate "Register hash algorithm implementations with Crypto API" |
79 | depends on CRYPTO_DEV_FSL_CAAM | 79 | depends on CRYPTO_DEV_FSL_CAAM |
80 | default y | 80 | default y |
81 | select CRYPTO_AHASH | 81 | select CRYPTO_HASH |
82 | help | 82 | help |
83 | Selecting this will offload ahash for users of the | 83 | Selecting this will offload ahash for users of the |
84 | scatterlist crypto API to the SEC4 via job ring. | 84 | scatterlist crypto API to the SEC4 via job ring. |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index cf268b14ae9a..765fdf5ce579 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -1693,6 +1693,7 @@ static struct caam_alg_template driver_algs[] = { | |||
1693 | .name = "authenc(hmac(sha224),cbc(aes))", | 1693 | .name = "authenc(hmac(sha224),cbc(aes))", |
1694 | .driver_name = "authenc-hmac-sha224-cbc-aes-caam", | 1694 | .driver_name = "authenc-hmac-sha224-cbc-aes-caam", |
1695 | .blocksize = AES_BLOCK_SIZE, | 1695 | .blocksize = AES_BLOCK_SIZE, |
1696 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1696 | .template_aead = { | 1697 | .template_aead = { |
1697 | .setkey = aead_setkey, | 1698 | .setkey = aead_setkey, |
1698 | .setauthsize = aead_setauthsize, | 1699 | .setauthsize = aead_setauthsize, |
@@ -1732,6 +1733,7 @@ static struct caam_alg_template driver_algs[] = { | |||
1732 | .name = "authenc(hmac(sha384),cbc(aes))", | 1733 | .name = "authenc(hmac(sha384),cbc(aes))", |
1733 | .driver_name = "authenc-hmac-sha384-cbc-aes-caam", | 1734 | .driver_name = "authenc-hmac-sha384-cbc-aes-caam", |
1734 | .blocksize = AES_BLOCK_SIZE, | 1735 | .blocksize = AES_BLOCK_SIZE, |
1736 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1735 | .template_aead = { | 1737 | .template_aead = { |
1736 | .setkey = aead_setkey, | 1738 | .setkey = aead_setkey, |
1737 | .setauthsize = aead_setauthsize, | 1739 | .setauthsize = aead_setauthsize, |
@@ -1810,6 +1812,7 @@ static struct caam_alg_template driver_algs[] = { | |||
1810 | .name = "authenc(hmac(sha224),cbc(des3_ede))", | 1812 | .name = "authenc(hmac(sha224),cbc(des3_ede))", |
1811 | .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", | 1813 | .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", |
1812 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1814 | .blocksize = DES3_EDE_BLOCK_SIZE, |
1815 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1813 | .template_aead = { | 1816 | .template_aead = { |
1814 | .setkey = aead_setkey, | 1817 | .setkey = aead_setkey, |
1815 | .setauthsize = aead_setauthsize, | 1818 | .setauthsize = aead_setauthsize, |
@@ -1849,6 +1852,7 @@ static struct caam_alg_template driver_algs[] = { | |||
1849 | .name = "authenc(hmac(sha384),cbc(des3_ede))", | 1852 | .name = "authenc(hmac(sha384),cbc(des3_ede))", |
1850 | .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", | 1853 | .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", |
1851 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1854 | .blocksize = DES3_EDE_BLOCK_SIZE, |
1855 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1852 | .template_aead = { | 1856 | .template_aead = { |
1853 | .setkey = aead_setkey, | 1857 | .setkey = aead_setkey, |
1854 | .setauthsize = aead_setauthsize, | 1858 | .setauthsize = aead_setauthsize, |
@@ -1926,6 +1930,7 @@ static struct caam_alg_template driver_algs[] = { | |||
1926 | .name = "authenc(hmac(sha224),cbc(des))", | 1930 | .name = "authenc(hmac(sha224),cbc(des))", |
1927 | .driver_name = "authenc-hmac-sha224-cbc-des-caam", | 1931 | .driver_name = "authenc-hmac-sha224-cbc-des-caam", |
1928 | .blocksize = DES_BLOCK_SIZE, | 1932 | .blocksize = DES_BLOCK_SIZE, |
1933 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1929 | .template_aead = { | 1934 | .template_aead = { |
1930 | .setkey = aead_setkey, | 1935 | .setkey = aead_setkey, |
1931 | .setauthsize = aead_setauthsize, | 1936 | .setauthsize = aead_setauthsize, |
@@ -1965,6 +1970,7 @@ static struct caam_alg_template driver_algs[] = { | |||
1965 | .name = "authenc(hmac(sha384),cbc(des))", | 1970 | .name = "authenc(hmac(sha384),cbc(des))", |
1966 | .driver_name = "authenc-hmac-sha384-cbc-des-caam", | 1971 | .driver_name = "authenc-hmac-sha384-cbc-des-caam", |
1967 | .blocksize = DES_BLOCK_SIZE, | 1972 | .blocksize = DES_BLOCK_SIZE, |
1973 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1968 | .template_aead = { | 1974 | .template_aead = { |
1969 | .setkey = aead_setkey, | 1975 | .setkey = aead_setkey, |
1970 | .setauthsize = aead_setauthsize, | 1976 | .setauthsize = aead_setauthsize, |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 32aba7a61503..5996521a1caf 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -411,7 +411,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
411 | return 0; | 411 | return 0; |
412 | } | 412 | } |
413 | 413 | ||
414 | static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, | 414 | static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, |
415 | u32 keylen) | 415 | u32 keylen) |
416 | { | 416 | { |
417 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, | 417 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, |
@@ -420,7 +420,7 @@ static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
420 | } | 420 | } |
421 | 421 | ||
422 | /* Digest hash size if it is too large */ | 422 | /* Digest hash size if it is too large */ |
423 | static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | 423 | static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, |
424 | u32 *keylen, u8 *key_out, u32 digestsize) | 424 | u32 *keylen, u8 *key_out, u32 digestsize) |
425 | { | 425 | { |
426 | struct device *jrdev = ctx->jrdev; | 426 | struct device *jrdev = ctx->jrdev; |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 8acf00490fd5..6e94bcd94678 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -304,6 +304,9 @@ static int caam_probe(struct platform_device *pdev) | |||
304 | caam_remove(pdev); | 304 | caam_remove(pdev); |
305 | return ret; | 305 | return ret; |
306 | } | 306 | } |
307 | |||
308 | /* Enable RDB bit so that RNG works faster */ | ||
309 | setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); | ||
307 | } | 310 | } |
308 | 311 | ||
309 | /* NOTE: RTIC detection ought to go here, around Si time */ | 312 | /* NOTE: RTIC detection ought to go here, around Si time */ |
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 30b8f74833d4..9f25f5296029 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
@@ -36,7 +36,7 @@ static void report_jump_idx(u32 status, char *outstr) | |||
36 | 36 | ||
37 | static void report_ccb_status(u32 status, char *outstr) | 37 | static void report_ccb_status(u32 status, char *outstr) |
38 | { | 38 | { |
39 | char *cha_id_list[] = { | 39 | static const char * const cha_id_list[] = { |
40 | "", | 40 | "", |
41 | "AES", | 41 | "AES", |
42 | "DES", | 42 | "DES", |
@@ -51,7 +51,7 @@ static void report_ccb_status(u32 status, char *outstr) | |||
51 | "ZUCE", | 51 | "ZUCE", |
52 | "ZUCA", | 52 | "ZUCA", |
53 | }; | 53 | }; |
54 | char *err_id_list[] = { | 54 | static const char * const err_id_list[] = { |
55 | "No error.", | 55 | "No error.", |
56 | "Mode error.", | 56 | "Mode error.", |
57 | "Data size error.", | 57 | "Data size error.", |
@@ -69,7 +69,7 @@ static void report_ccb_status(u32 status, char *outstr) | |||
69 | "Invalid CHA combination was selected", | 69 | "Invalid CHA combination was selected", |
70 | "Invalid CHA selected.", | 70 | "Invalid CHA selected.", |
71 | }; | 71 | }; |
72 | char *rng_err_id_list[] = { | 72 | static const char * const rng_err_id_list[] = { |
73 | "", | 73 | "", |
74 | "", | 74 | "", |
75 | "", | 75 | "", |
@@ -117,7 +117,7 @@ static void report_jump_status(u32 status, char *outstr) | |||
117 | 117 | ||
118 | static void report_deco_status(u32 status, char *outstr) | 118 | static void report_deco_status(u32 status, char *outstr) |
119 | { | 119 | { |
120 | const struct { | 120 | static const struct { |
121 | u8 value; | 121 | u8 value; |
122 | char *error_text; | 122 | char *error_text; |
123 | } desc_error_list[] = { | 123 | } desc_error_list[] = { |
@@ -245,7 +245,7 @@ static void report_cond_code_status(u32 status, char *outstr) | |||
245 | 245 | ||
246 | char *caam_jr_strstatus(char *outstr, u32 status) | 246 | char *caam_jr_strstatus(char *outstr, u32 status) |
247 | { | 247 | { |
248 | struct stat_src { | 248 | static const struct stat_src { |
249 | void (*report_ssed)(u32 status, char *outstr); | 249 | void (*report_ssed)(u32 status, char *outstr); |
250 | char *error; | 250 | char *error; |
251 | } status_src[] = { | 251 | } status_src[] = { |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 5cd4c1b268a1..e4a16b741371 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -41,6 +41,7 @@ struct caam_jrentry_info { | |||
41 | /* Private sub-storage for a single JobR */ | 41 | /* Private sub-storage for a single JobR */ |
42 | struct caam_drv_private_jr { | 42 | struct caam_drv_private_jr { |
43 | struct device *parentdev; /* points back to controller dev */ | 43 | struct device *parentdev; /* points back to controller dev */ |
44 | struct platform_device *jr_pdev;/* points to platform device for JR */ | ||
44 | int ridx; | 45 | int ridx; |
45 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | 46 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
46 | struct tasklet_struct irqtask; | 47 | struct tasklet_struct irqtask; |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 93d14070141a..b4aa773ecbc8 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -407,6 +407,7 @@ int caam_jr_shutdown(struct device *dev) | |||
407 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, | 407 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, |
408 | jrp->outring, outbusaddr); | 408 | jrp->outring, outbusaddr); |
409 | kfree(jrp->entinfo); | 409 | kfree(jrp->entinfo); |
410 | of_device_unregister(jrp->jr_pdev); | ||
410 | 411 | ||
411 | return ret; | 412 | return ret; |
412 | } | 413 | } |
@@ -454,6 +455,8 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | |||
454 | kfree(jrpriv); | 455 | kfree(jrpriv); |
455 | return -EINVAL; | 456 | return -EINVAL; |
456 | } | 457 | } |
458 | |||
459 | jrpriv->jr_pdev = jr_pdev; | ||
457 | jrdev = &jr_pdev->dev; | 460 | jrdev = &jr_pdev->dev; |
458 | dev_set_drvdata(jrdev, jrpriv); | 461 | dev_set_drvdata(jrdev, jrpriv); |
459 | ctrlpriv->jrdev[ring] = jrdev; | 462 | ctrlpriv->jrdev[ring] = jrdev; |
@@ -472,6 +475,7 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | |||
472 | /* Now do the platform independent part */ | 475 | /* Now do the platform independent part */ |
473 | error = caam_jr_init(jrdev); /* now turn on hardware */ | 476 | error = caam_jr_init(jrdev); /* now turn on hardware */ |
474 | if (error) { | 477 | if (error) { |
478 | of_device_unregister(jr_pdev); | ||
475 | kfree(jrpriv); | 479 | kfree(jrpriv); |
476 | return error; | 480 | return error; |
477 | } | 481 | } |
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index f6dba10246c3..87138d2adb5f 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c | |||
@@ -44,7 +44,7 @@ Split key generation----------------------------------------------- | |||
44 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | 44 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 |
45 | @0xffe04000 | 45 | @0xffe04000 |
46 | */ | 46 | */ |
47 | u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | 47 | int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, |
48 | int split_key_pad_len, const u8 *key_in, u32 keylen, | 48 | int split_key_pad_len, const u8 *key_in, u32 keylen, |
49 | u32 alg_op) | 49 | u32 alg_op) |
50 | { | 50 | { |
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h index d95d290c6e8b..c5588f6d8109 100644 --- a/drivers/crypto/caam/key_gen.h +++ b/drivers/crypto/caam/key_gen.h | |||
@@ -12,6 +12,6 @@ struct split_key_result { | |||
12 | 12 | ||
13 | void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); | 13 | void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); |
14 | 14 | ||
15 | u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | 15 | int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, |
16 | int split_key_pad_len, const u8 *key_in, u32 keylen, | 16 | int split_key_pad_len, const u8 *key_in, u32 keylen, |
17 | u32 alg_op); | 17 | u32 alg_op); |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 3223fc6d647c..cd6fedad9935 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -252,7 +252,8 @@ struct caam_ctrl { | |||
252 | /* Read/Writable */ | 252 | /* Read/Writable */ |
253 | u32 rsvd1; | 253 | u32 rsvd1; |
254 | u32 mcr; /* MCFG Master Config Register */ | 254 | u32 mcr; /* MCFG Master Config Register */ |
255 | u32 rsvd2[2]; | 255 | u32 rsvd2; |
256 | u32 scfgr; /* SCFGR, Security Config Register */ | ||
256 | 257 | ||
257 | /* Bus Access Configuration Section 010-11f */ | 258 | /* Bus Access Configuration Section 010-11f */ |
258 | /* Read/Writable */ | 259 | /* Read/Writable */ |
@@ -299,6 +300,7 @@ struct caam_ctrl { | |||
299 | #define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */ | 300 | #define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */ |
300 | #define MCFGR_DMA_RESET 0x10000000 | 301 | #define MCFGR_DMA_RESET 0x10000000 |
301 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ | 302 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ |
303 | #define SCFGR_RDBENABLE 0x00000400 | ||
302 | 304 | ||
303 | /* AXI read cache control */ | 305 | /* AXI read cache control */ |
304 | #define MCFGR_ARCACHE_SHIFT 12 | 306 | #define MCFGR_ARCACHE_SHIFT 12 |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 6aa425fe0ed5..ee15b0f7849a 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -636,7 +636,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | |||
636 | 636 | ||
637 | pr_debug("err: %d\n", err); | 637 | pr_debug("err: %d\n", err); |
638 | 638 | ||
639 | pm_runtime_put_sync(dd->dev); | 639 | pm_runtime_put(dd->dev); |
640 | dd->flags &= ~FLAGS_BUSY; | 640 | dd->flags &= ~FLAGS_BUSY; |
641 | 641 | ||
642 | req->base.complete(&req->base, err); | 642 | req->base.complete(&req->base, err); |
@@ -1248,18 +1248,7 @@ static struct platform_driver omap_aes_driver = { | |||
1248 | }, | 1248 | }, |
1249 | }; | 1249 | }; |
1250 | 1250 | ||
1251 | static int __init omap_aes_mod_init(void) | 1251 | module_platform_driver(omap_aes_driver); |
1252 | { | ||
1253 | return platform_driver_register(&omap_aes_driver); | ||
1254 | } | ||
1255 | |||
1256 | static void __exit omap_aes_mod_exit(void) | ||
1257 | { | ||
1258 | platform_driver_unregister(&omap_aes_driver); | ||
1259 | } | ||
1260 | |||
1261 | module_init(omap_aes_mod_init); | ||
1262 | module_exit(omap_aes_mod_exit); | ||
1263 | 1252 | ||
1264 | MODULE_DESCRIPTION("OMAP AES hw acceleration support."); | 1253 | MODULE_DESCRIPTION("OMAP AES hw acceleration support."); |
1265 | MODULE_LICENSE("GPL v2"); | 1254 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 3d1611f5aecf..a1e1b4756ee5 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -923,7 +923,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) | |||
923 | dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | | 923 | dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | |
924 | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); | 924 | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); |
925 | 925 | ||
926 | pm_runtime_put_sync(dd->dev); | 926 | pm_runtime_put(dd->dev); |
927 | 927 | ||
928 | if (req->base.complete) | 928 | if (req->base.complete) |
929 | req->base.complete(&req->base, err); | 929 | req->base.complete(&req->base, err); |
@@ -1813,18 +1813,7 @@ static struct platform_driver omap_sham_driver = { | |||
1813 | }, | 1813 | }, |
1814 | }; | 1814 | }; |
1815 | 1815 | ||
1816 | static int __init omap_sham_mod_init(void) | 1816 | module_platform_driver(omap_sham_driver); |
1817 | { | ||
1818 | return platform_driver_register(&omap_sham_driver); | ||
1819 | } | ||
1820 | |||
1821 | static void __exit omap_sham_mod_exit(void) | ||
1822 | { | ||
1823 | platform_driver_unregister(&omap_sham_driver); | ||
1824 | } | ||
1825 | |||
1826 | module_init(omap_sham_mod_init); | ||
1827 | module_exit(omap_sham_mod_exit); | ||
1828 | 1817 | ||
1829 | MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); | 1818 | MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); |
1830 | MODULE_LICENSE("GPL v2"); | 1819 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 2096d4685a9e..ac30724d923d 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -1688,8 +1688,6 @@ static const struct of_device_id spacc_of_id_table[] = { | |||
1688 | { .compatible = "picochip,spacc-l2" }, | 1688 | { .compatible = "picochip,spacc-l2" }, |
1689 | {} | 1689 | {} |
1690 | }; | 1690 | }; |
1691 | #else /* CONFIG_OF */ | ||
1692 | #define spacc_of_id_table NULL | ||
1693 | #endif /* CONFIG_OF */ | 1691 | #endif /* CONFIG_OF */ |
1694 | 1692 | ||
1695 | static bool spacc_is_compatible(struct platform_device *pdev, | 1693 | static bool spacc_is_compatible(struct platform_device *pdev, |
@@ -1874,7 +1872,7 @@ static struct platform_driver spacc_driver = { | |||
1874 | #ifdef CONFIG_PM | 1872 | #ifdef CONFIG_PM |
1875 | .pm = &spacc_pm_ops, | 1873 | .pm = &spacc_pm_ops, |
1876 | #endif /* CONFIG_PM */ | 1874 | #endif /* CONFIG_PM */ |
1877 | .of_match_table = spacc_of_id_table, | 1875 | .of_match_table = of_match_ptr(spacc_of_id_table), |
1878 | }, | 1876 | }, |
1879 | .id_table = spacc_id_table, | 1877 | .id_table = spacc_id_table, |
1880 | }; | 1878 | }; |
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c new file mode 100644 index 000000000000..a97bb6c1596c --- /dev/null +++ b/drivers/crypto/sahara.c | |||
@@ -0,0 +1,1070 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for SAHARA cryptographic accelerator. | ||
5 | * | ||
6 | * Copyright (c) 2013 Vista Silicon S.L. | ||
7 | * Author: Javier Martin <javier.martin@vista-silicon.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * Based on omap-aes.c and tegra-aes.c | ||
14 | */ | ||
15 | |||
16 | #include <crypto/algapi.h> | ||
17 | #include <crypto/aes.h> | ||
18 | |||
19 | #include <linux/clk.h> | ||
20 | #include <linux/crypto.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/irq.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/of.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | |||
29 | #define SAHARA_NAME "sahara" | ||
30 | #define SAHARA_VERSION_3 3 | ||
31 | #define SAHARA_TIMEOUT_MS 1000 | ||
32 | #define SAHARA_MAX_HW_DESC 2 | ||
33 | #define SAHARA_MAX_HW_LINK 20 | ||
34 | |||
35 | #define FLAGS_MODE_MASK 0x000f | ||
36 | #define FLAGS_ENCRYPT BIT(0) | ||
37 | #define FLAGS_CBC BIT(1) | ||
38 | #define FLAGS_NEW_KEY BIT(3) | ||
39 | #define FLAGS_BUSY 4 | ||
40 | |||
41 | #define SAHARA_HDR_BASE 0x00800000 | ||
42 | #define SAHARA_HDR_SKHA_ALG_AES 0 | ||
43 | #define SAHARA_HDR_SKHA_OP_ENC (1 << 2) | ||
44 | #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3) | ||
45 | #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3) | ||
46 | #define SAHARA_HDR_FORM_DATA (5 << 16) | ||
47 | #define SAHARA_HDR_FORM_KEY (8 << 16) | ||
48 | #define SAHARA_HDR_LLO (1 << 24) | ||
49 | #define SAHARA_HDR_CHA_SKHA (1 << 28) | ||
50 | #define SAHARA_HDR_CHA_MDHA (2 << 28) | ||
51 | #define SAHARA_HDR_PARITY_BIT (1 << 31) | ||
52 | |||
53 | /* SAHARA can only process one request at a time */ | ||
54 | #define SAHARA_QUEUE_LENGTH 1 | ||
55 | |||
56 | #define SAHARA_REG_VERSION 0x00 | ||
57 | #define SAHARA_REG_DAR 0x04 | ||
58 | #define SAHARA_REG_CONTROL 0x08 | ||
59 | #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24) | ||
60 | #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16) | ||
61 | #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7) | ||
62 | #define SAHARA_CONTROL_ENABLE_INT (1 << 4) | ||
63 | #define SAHARA_REG_CMD 0x0C | ||
64 | #define SAHARA_CMD_RESET (1 << 0) | ||
65 | #define SAHARA_CMD_CLEAR_INT (1 << 8) | ||
66 | #define SAHARA_CMD_CLEAR_ERR (1 << 9) | ||
67 | #define SAHARA_CMD_SINGLE_STEP (1 << 10) | ||
68 | #define SAHARA_CMD_MODE_BATCH (1 << 16) | ||
69 | #define SAHARA_CMD_MODE_DEBUG (1 << 18) | ||
70 | #define SAHARA_REG_STATUS 0x10 | ||
71 | #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7) | ||
72 | #define SAHARA_STATE_IDLE 0 | ||
73 | #define SAHARA_STATE_BUSY 1 | ||
74 | #define SAHARA_STATE_ERR 2 | ||
75 | #define SAHARA_STATE_FAULT 3 | ||
76 | #define SAHARA_STATE_COMPLETE 4 | ||
77 | #define SAHARA_STATE_COMP_FLAG (1 << 2) | ||
78 | #define SAHARA_STATUS_DAR_FULL (1 << 3) | ||
79 | #define SAHARA_STATUS_ERROR (1 << 4) | ||
80 | #define SAHARA_STATUS_SECURE (1 << 5) | ||
81 | #define SAHARA_STATUS_FAIL (1 << 6) | ||
82 | #define SAHARA_STATUS_INIT (1 << 7) | ||
83 | #define SAHARA_STATUS_RNG_RESEED (1 << 8) | ||
84 | #define SAHARA_STATUS_ACTIVE_RNG (1 << 9) | ||
85 | #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10) | ||
86 | #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11) | ||
87 | #define SAHARA_STATUS_MODE_BATCH (1 << 16) | ||
88 | #define SAHARA_STATUS_MODE_DEDICATED (1 << 17) | ||
89 | #define SAHARA_STATUS_MODE_DEBUG (1 << 18) | ||
90 | #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff) | ||
91 | #define SAHARA_REG_ERRSTATUS 0x14 | ||
92 | #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf) | ||
93 | #define SAHARA_ERRSOURCE_CHA 14 | ||
94 | #define SAHARA_ERRSOURCE_DMA 15 | ||
95 | #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8) | ||
96 | #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3) | ||
97 | #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7) | ||
98 | #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff) | ||
99 | #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3) | ||
100 | #define SAHARA_REG_FADDR 0x18 | ||
101 | #define SAHARA_REG_CDAR 0x1C | ||
102 | #define SAHARA_REG_IDAR 0x20 | ||
103 | |||
104 | struct sahara_hw_desc { | ||
105 | u32 hdr; | ||
106 | u32 len1; | ||
107 | dma_addr_t p1; | ||
108 | u32 len2; | ||
109 | dma_addr_t p2; | ||
110 | dma_addr_t next; | ||
111 | }; | ||
112 | |||
113 | struct sahara_hw_link { | ||
114 | u32 len; | ||
115 | dma_addr_t p; | ||
116 | dma_addr_t next; | ||
117 | }; | ||
118 | |||
119 | struct sahara_ctx { | ||
120 | struct sahara_dev *dev; | ||
121 | unsigned long flags; | ||
122 | int keylen; | ||
123 | u8 key[AES_KEYSIZE_128]; | ||
124 | struct crypto_ablkcipher *fallback; | ||
125 | }; | ||
126 | |||
127 | struct sahara_aes_reqctx { | ||
128 | unsigned long mode; | ||
129 | }; | ||
130 | |||
131 | struct sahara_dev { | ||
132 | struct device *device; | ||
133 | void __iomem *regs_base; | ||
134 | struct clk *clk_ipg; | ||
135 | struct clk *clk_ahb; | ||
136 | |||
137 | struct sahara_ctx *ctx; | ||
138 | spinlock_t lock; | ||
139 | struct crypto_queue queue; | ||
140 | unsigned long flags; | ||
141 | |||
142 | struct tasklet_struct done_task; | ||
143 | struct tasklet_struct queue_task; | ||
144 | |||
145 | struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC]; | ||
146 | dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC]; | ||
147 | |||
148 | u8 *key_base; | ||
149 | dma_addr_t key_phys_base; | ||
150 | |||
151 | u8 *iv_base; | ||
152 | dma_addr_t iv_phys_base; | ||
153 | |||
154 | struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK]; | ||
155 | dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK]; | ||
156 | |||
157 | struct ablkcipher_request *req; | ||
158 | size_t total; | ||
159 | struct scatterlist *in_sg; | ||
160 | unsigned int nb_in_sg; | ||
161 | struct scatterlist *out_sg; | ||
162 | unsigned int nb_out_sg; | ||
163 | |||
164 | u32 error; | ||
165 | struct timer_list watchdog; | ||
166 | }; | ||
167 | |||
168 | static struct sahara_dev *dev_ptr; | ||
169 | |||
170 | static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg) | ||
171 | { | ||
172 | writel(data, dev->regs_base + reg); | ||
173 | } | ||
174 | |||
175 | static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg) | ||
176 | { | ||
177 | return readl(dev->regs_base + reg); | ||
178 | } | ||
179 | |||
180 | static u32 sahara_aes_key_hdr(struct sahara_dev *dev) | ||
181 | { | ||
182 | u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES | | ||
183 | SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO | | ||
184 | SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT; | ||
185 | |||
186 | if (dev->flags & FLAGS_CBC) { | ||
187 | hdr |= SAHARA_HDR_SKHA_MODE_CBC; | ||
188 | hdr ^= SAHARA_HDR_PARITY_BIT; | ||
189 | } | ||
190 | |||
191 | if (dev->flags & FLAGS_ENCRYPT) { | ||
192 | hdr |= SAHARA_HDR_SKHA_OP_ENC; | ||
193 | hdr ^= SAHARA_HDR_PARITY_BIT; | ||
194 | } | ||
195 | |||
196 | return hdr; | ||
197 | } | ||
198 | |||
199 | static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev) | ||
200 | { | ||
201 | return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA | | ||
202 | SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT; | ||
203 | } | ||
204 | |||
205 | static int sahara_sg_length(struct scatterlist *sg, | ||
206 | unsigned int total) | ||
207 | { | ||
208 | int sg_nb; | ||
209 | unsigned int len; | ||
210 | struct scatterlist *sg_list; | ||
211 | |||
212 | sg_nb = 0; | ||
213 | sg_list = sg; | ||
214 | |||
215 | while (total) { | ||
216 | len = min(sg_list->length, total); | ||
217 | |||
218 | sg_nb++; | ||
219 | total -= len; | ||
220 | |||
221 | sg_list = sg_next(sg_list); | ||
222 | if (!sg_list) | ||
223 | total = 0; | ||
224 | } | ||
225 | |||
226 | return sg_nb; | ||
227 | } | ||
228 | |||
229 | static char *sahara_err_src[16] = { | ||
230 | "No error", | ||
231 | "Header error", | ||
232 | "Descriptor length error", | ||
233 | "Descriptor length or pointer error", | ||
234 | "Link length error", | ||
235 | "Link pointer error", | ||
236 | "Input buffer error", | ||
237 | "Output buffer error", | ||
238 | "Output buffer starvation", | ||
239 | "Internal state fault", | ||
240 | "General descriptor problem", | ||
241 | "Reserved", | ||
242 | "Descriptor address error", | ||
243 | "Link address error", | ||
244 | "CHA error", | ||
245 | "DMA error" | ||
246 | }; | ||
247 | |||
248 | static char *sahara_err_dmasize[4] = { | ||
249 | "Byte transfer", | ||
250 | "Half-word transfer", | ||
251 | "Word transfer", | ||
252 | "Reserved" | ||
253 | }; | ||
254 | |||
255 | static char *sahara_err_dmasrc[8] = { | ||
256 | "No error", | ||
257 | "AHB bus error", | ||
258 | "Internal IP bus error", | ||
259 | "Parity error", | ||
260 | "DMA crosses 256 byte boundary", | ||
261 | "DMA is busy", | ||
262 | "Reserved", | ||
263 | "DMA HW error" | ||
264 | }; | ||
265 | |||
266 | static char *sahara_cha_errsrc[12] = { | ||
267 | "Input buffer non-empty", | ||
268 | "Illegal address", | ||
269 | "Illegal mode", | ||
270 | "Illegal data size", | ||
271 | "Illegal key size", | ||
272 | "Write during processing", | ||
273 | "CTX read during processing", | ||
274 | "HW error", | ||
275 | "Input buffer disabled/underflow", | ||
276 | "Output buffer disabled/overflow", | ||
277 | "DES key parity error", | ||
278 | "Reserved" | ||
279 | }; | ||
280 | |||
281 | static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" }; | ||
282 | |||
283 | static void sahara_decode_error(struct sahara_dev *dev, unsigned int error) | ||
284 | { | ||
285 | u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error); | ||
286 | u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error)); | ||
287 | |||
288 | dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error); | ||
289 | |||
290 | dev_err(dev->device, " - %s.\n", sahara_err_src[source]); | ||
291 | |||
292 | if (source == SAHARA_ERRSOURCE_DMA) { | ||
293 | if (error & SAHARA_ERRSTATUS_DMA_DIR) | ||
294 | dev_err(dev->device, " * DMA read.\n"); | ||
295 | else | ||
296 | dev_err(dev->device, " * DMA write.\n"); | ||
297 | |||
298 | dev_err(dev->device, " * %s.\n", | ||
299 | sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]); | ||
300 | dev_err(dev->device, " * %s.\n", | ||
301 | sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]); | ||
302 | } else if (source == SAHARA_ERRSOURCE_CHA) { | ||
303 | dev_err(dev->device, " * %s.\n", | ||
304 | sahara_cha_errsrc[chasrc]); | ||
305 | dev_err(dev->device, " * %s.\n", | ||
306 | sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]); | ||
307 | } | ||
308 | dev_err(dev->device, "\n"); | ||
309 | } | ||
310 | |||
311 | static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" }; | ||
312 | |||
313 | static void sahara_decode_status(struct sahara_dev *dev, unsigned int status) | ||
314 | { | ||
315 | u8 state; | ||
316 | |||
317 | if (!IS_ENABLED(DEBUG)) | ||
318 | return; | ||
319 | |||
320 | state = SAHARA_STATUS_GET_STATE(status); | ||
321 | |||
322 | dev_dbg(dev->device, "%s: Status Register = 0x%08x\n", | ||
323 | __func__, status); | ||
324 | |||
325 | dev_dbg(dev->device, " - State = %d:\n", state); | ||
326 | if (state & SAHARA_STATE_COMP_FLAG) | ||
327 | dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n"); | ||
328 | |||
329 | dev_dbg(dev->device, " * %s.\n", | ||
330 | sahara_state[state & ~SAHARA_STATE_COMP_FLAG]); | ||
331 | |||
332 | if (status & SAHARA_STATUS_DAR_FULL) | ||
333 | dev_dbg(dev->device, " - DAR Full.\n"); | ||
334 | if (status & SAHARA_STATUS_ERROR) | ||
335 | dev_dbg(dev->device, " - Error.\n"); | ||
336 | if (status & SAHARA_STATUS_SECURE) | ||
337 | dev_dbg(dev->device, " - Secure.\n"); | ||
338 | if (status & SAHARA_STATUS_FAIL) | ||
339 | dev_dbg(dev->device, " - Fail.\n"); | ||
340 | if (status & SAHARA_STATUS_RNG_RESEED) | ||
341 | dev_dbg(dev->device, " - RNG Reseed Request.\n"); | ||
342 | if (status & SAHARA_STATUS_ACTIVE_RNG) | ||
343 | dev_dbg(dev->device, " - RNG Active.\n"); | ||
344 | if (status & SAHARA_STATUS_ACTIVE_MDHA) | ||
345 | dev_dbg(dev->device, " - MDHA Active.\n"); | ||
346 | if (status & SAHARA_STATUS_ACTIVE_SKHA) | ||
347 | dev_dbg(dev->device, " - SKHA Active.\n"); | ||
348 | |||
349 | if (status & SAHARA_STATUS_MODE_BATCH) | ||
350 | dev_dbg(dev->device, " - Batch Mode.\n"); | ||
351 | else if (status & SAHARA_STATUS_MODE_DEDICATED) | ||
352 | dev_dbg(dev->device, " - Decidated Mode.\n"); | ||
353 | else if (status & SAHARA_STATUS_MODE_DEBUG) | ||
354 | dev_dbg(dev->device, " - Debug Mode.\n"); | ||
355 | |||
356 | dev_dbg(dev->device, " - Internal state = 0x%02x\n", | ||
357 | SAHARA_STATUS_GET_ISTATE(status)); | ||
358 | |||
359 | dev_dbg(dev->device, "Current DAR: 0x%08x\n", | ||
360 | sahara_read(dev, SAHARA_REG_CDAR)); | ||
361 | dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n", | ||
362 | sahara_read(dev, SAHARA_REG_IDAR)); | ||
363 | } | ||
364 | |||
365 | static void sahara_dump_descriptors(struct sahara_dev *dev) | ||
366 | { | ||
367 | int i; | ||
368 | |||
369 | if (!IS_ENABLED(DEBUG)) | ||
370 | return; | ||
371 | |||
372 | for (i = 0; i < SAHARA_MAX_HW_DESC; i++) { | ||
373 | dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n", | ||
374 | i, dev->hw_phys_desc[i]); | ||
375 | dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr); | ||
376 | dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1); | ||
377 | dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1); | ||
378 | dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2); | ||
379 | dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2); | ||
380 | dev_dbg(dev->device, "\tnext = 0x%08x\n", | ||
381 | dev->hw_desc[i]->next); | ||
382 | } | ||
383 | dev_dbg(dev->device, "\n"); | ||
384 | } | ||
385 | |||
386 | static void sahara_dump_links(struct sahara_dev *dev) | ||
387 | { | ||
388 | int i; | ||
389 | |||
390 | if (!IS_ENABLED(DEBUG)) | ||
391 | return; | ||
392 | |||
393 | for (i = 0; i < SAHARA_MAX_HW_LINK; i++) { | ||
394 | dev_dbg(dev->device, "Link (%d) (0x%08x):\n", | ||
395 | i, dev->hw_phys_link[i]); | ||
396 | dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len); | ||
397 | dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p); | ||
398 | dev_dbg(dev->device, "\tnext = 0x%08x\n", | ||
399 | dev->hw_link[i]->next); | ||
400 | } | ||
401 | dev_dbg(dev->device, "\n"); | ||
402 | } | ||
403 | |||
404 | static void sahara_aes_done_task(unsigned long data) | ||
405 | { | ||
406 | struct sahara_dev *dev = (struct sahara_dev *)data; | ||
407 | |||
408 | dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, | ||
409 | DMA_TO_DEVICE); | ||
410 | dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, | ||
411 | DMA_FROM_DEVICE); | ||
412 | |||
413 | spin_lock(&dev->lock); | ||
414 | clear_bit(FLAGS_BUSY, &dev->flags); | ||
415 | spin_unlock(&dev->lock); | ||
416 | |||
417 | dev->req->base.complete(&dev->req->base, dev->error); | ||
418 | } | ||
419 | |||
420 | void sahara_watchdog(unsigned long data) | ||
421 | { | ||
422 | struct sahara_dev *dev = (struct sahara_dev *)data; | ||
423 | unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); | ||
424 | unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS); | ||
425 | |||
426 | sahara_decode_status(dev, stat); | ||
427 | sahara_decode_error(dev, err); | ||
428 | dev->error = -ETIMEDOUT; | ||
429 | sahara_aes_done_task(data); | ||
430 | } | ||
431 | |||
432 | static int sahara_hw_descriptor_create(struct sahara_dev *dev) | ||
433 | { | ||
434 | struct sahara_ctx *ctx = dev->ctx; | ||
435 | struct scatterlist *sg; | ||
436 | int ret; | ||
437 | int i, j; | ||
438 | |||
439 | /* Copy new key if necessary */ | ||
440 | if (ctx->flags & FLAGS_NEW_KEY) { | ||
441 | memcpy(dev->key_base, ctx->key, ctx->keylen); | ||
442 | ctx->flags &= ~FLAGS_NEW_KEY; | ||
443 | |||
444 | if (dev->flags & FLAGS_CBC) { | ||
445 | dev->hw_desc[0]->len1 = AES_BLOCK_SIZE; | ||
446 | dev->hw_desc[0]->p1 = dev->iv_phys_base; | ||
447 | } else { | ||
448 | dev->hw_desc[0]->len1 = 0; | ||
449 | dev->hw_desc[0]->p1 = 0; | ||
450 | } | ||
451 | dev->hw_desc[0]->len2 = ctx->keylen; | ||
452 | dev->hw_desc[0]->p2 = dev->key_phys_base; | ||
453 | dev->hw_desc[0]->next = dev->hw_phys_desc[1]; | ||
454 | } | ||
455 | dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev); | ||
456 | |||
457 | dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total); | ||
458 | dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total); | ||
459 | if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) { | ||
460 | dev_err(dev->device, "not enough hw links (%d)\n", | ||
461 | dev->nb_in_sg + dev->nb_out_sg); | ||
462 | return -EINVAL; | ||
463 | } | ||
464 | |||
465 | ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, | ||
466 | DMA_TO_DEVICE); | ||
467 | if (ret != dev->nb_in_sg) { | ||
468 | dev_err(dev->device, "couldn't map in sg\n"); | ||
469 | goto unmap_in; | ||
470 | } | ||
471 | ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg, | ||
472 | DMA_FROM_DEVICE); | ||
473 | if (ret != dev->nb_out_sg) { | ||
474 | dev_err(dev->device, "couldn't map out sg\n"); | ||
475 | goto unmap_out; | ||
476 | } | ||
477 | |||
478 | /* Create input links */ | ||
479 | dev->hw_desc[1]->p1 = dev->hw_phys_link[0]; | ||
480 | sg = dev->in_sg; | ||
481 | for (i = 0; i < dev->nb_in_sg; i++) { | ||
482 | dev->hw_link[i]->len = sg->length; | ||
483 | dev->hw_link[i]->p = sg->dma_address; | ||
484 | if (i == (dev->nb_in_sg - 1)) { | ||
485 | dev->hw_link[i]->next = 0; | ||
486 | } else { | ||
487 | dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; | ||
488 | sg = sg_next(sg); | ||
489 | } | ||
490 | } | ||
491 | |||
492 | /* Create output links */ | ||
493 | dev->hw_desc[1]->p2 = dev->hw_phys_link[i]; | ||
494 | sg = dev->out_sg; | ||
495 | for (j = i; j < dev->nb_out_sg + i; j++) { | ||
496 | dev->hw_link[j]->len = sg->length; | ||
497 | dev->hw_link[j]->p = sg->dma_address; | ||
498 | if (j == (dev->nb_out_sg + i - 1)) { | ||
499 | dev->hw_link[j]->next = 0; | ||
500 | } else { | ||
501 | dev->hw_link[j]->next = dev->hw_phys_link[j + 1]; | ||
502 | sg = sg_next(sg); | ||
503 | } | ||
504 | } | ||
505 | |||
506 | /* Fill remaining fields of hw_desc[1] */ | ||
507 | dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev); | ||
508 | dev->hw_desc[1]->len1 = dev->total; | ||
509 | dev->hw_desc[1]->len2 = dev->total; | ||
510 | dev->hw_desc[1]->next = 0; | ||
511 | |||
512 | sahara_dump_descriptors(dev); | ||
513 | sahara_dump_links(dev); | ||
514 | |||
515 | /* Start processing descriptor chain. */ | ||
516 | mod_timer(&dev->watchdog, | ||
517 | jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS)); | ||
518 | sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR); | ||
519 | |||
520 | return 0; | ||
521 | |||
522 | unmap_out: | ||
523 | dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, | ||
524 | DMA_TO_DEVICE); | ||
525 | unmap_in: | ||
526 | dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, | ||
527 | DMA_FROM_DEVICE); | ||
528 | |||
529 | return -EINVAL; | ||
530 | } | ||
531 | |||
532 | static void sahara_aes_queue_task(unsigned long data) | ||
533 | { | ||
534 | struct sahara_dev *dev = (struct sahara_dev *)data; | ||
535 | struct crypto_async_request *async_req, *backlog; | ||
536 | struct sahara_ctx *ctx; | ||
537 | struct sahara_aes_reqctx *rctx; | ||
538 | struct ablkcipher_request *req; | ||
539 | int ret; | ||
540 | |||
541 | spin_lock(&dev->lock); | ||
542 | backlog = crypto_get_backlog(&dev->queue); | ||
543 | async_req = crypto_dequeue_request(&dev->queue); | ||
544 | if (!async_req) | ||
545 | clear_bit(FLAGS_BUSY, &dev->flags); | ||
546 | spin_unlock(&dev->lock); | ||
547 | |||
548 | if (!async_req) | ||
549 | return; | ||
550 | |||
551 | if (backlog) | ||
552 | backlog->complete(backlog, -EINPROGRESS); | ||
553 | |||
554 | req = ablkcipher_request_cast(async_req); | ||
555 | |||
556 | /* Request is ready to be dispatched by the device */ | ||
557 | dev_dbg(dev->device, | ||
558 | "dispatch request (nbytes=%d, src=%p, dst=%p)\n", | ||
559 | req->nbytes, req->src, req->dst); | ||
560 | |||
561 | /* assign new request to device */ | ||
562 | dev->req = req; | ||
563 | dev->total = req->nbytes; | ||
564 | dev->in_sg = req->src; | ||
565 | dev->out_sg = req->dst; | ||
566 | |||
567 | rctx = ablkcipher_request_ctx(req); | ||
568 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
569 | rctx->mode &= FLAGS_MODE_MASK; | ||
570 | dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; | ||
571 | |||
572 | if ((dev->flags & FLAGS_CBC) && req->info) | ||
573 | memcpy(dev->iv_base, req->info, AES_KEYSIZE_128); | ||
574 | |||
575 | /* assign new context to device */ | ||
576 | ctx->dev = dev; | ||
577 | dev->ctx = ctx; | ||
578 | |||
579 | ret = sahara_hw_descriptor_create(dev); | ||
580 | if (ret < 0) { | ||
581 | spin_lock(&dev->lock); | ||
582 | clear_bit(FLAGS_BUSY, &dev->flags); | ||
583 | spin_unlock(&dev->lock); | ||
584 | dev->req->base.complete(&dev->req->base, ret); | ||
585 | } | ||
586 | } | ||
587 | |||
588 | static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
589 | unsigned int keylen) | ||
590 | { | ||
591 | struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
592 | int ret; | ||
593 | |||
594 | ctx->keylen = keylen; | ||
595 | |||
596 | /* SAHARA only supports 128bit keys */ | ||
597 | if (keylen == AES_KEYSIZE_128) { | ||
598 | memcpy(ctx->key, key, keylen); | ||
599 | ctx->flags |= FLAGS_NEW_KEY; | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | if (keylen != AES_KEYSIZE_128 && | ||
604 | keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) | ||
605 | return -EINVAL; | ||
606 | |||
607 | /* | ||
608 | * The requested key size is not supported by HW, do a fallback. | ||
609 | */ | ||
610 | ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | ||
611 | ctx->fallback->base.crt_flags |= | ||
612 | (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); | ||
613 | |||
614 | ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); | ||
615 | if (ret) { | ||
616 | struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm); | ||
617 | |||
618 | tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK; | ||
619 | tfm_aux->crt_flags |= | ||
620 | (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK); | ||
621 | } | ||
622 | return ret; | ||
623 | } | ||
624 | |||
625 | static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
626 | { | ||
627 | struct sahara_ctx *ctx = crypto_ablkcipher_ctx( | ||
628 | crypto_ablkcipher_reqtfm(req)); | ||
629 | struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
630 | struct sahara_dev *dev = dev_ptr; | ||
631 | int err = 0; | ||
632 | int busy; | ||
633 | |||
634 | dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n", | ||
635 | req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); | ||
636 | |||
637 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
638 | dev_err(dev->device, | ||
639 | "request size is not exact amount of AES blocks\n"); | ||
640 | return -EINVAL; | ||
641 | } | ||
642 | |||
643 | ctx->dev = dev; | ||
644 | |||
645 | rctx->mode = mode; | ||
646 | spin_lock_bh(&dev->lock); | ||
647 | err = ablkcipher_enqueue_request(&dev->queue, req); | ||
648 | busy = test_and_set_bit(FLAGS_BUSY, &dev->flags); | ||
649 | spin_unlock_bh(&dev->lock); | ||
650 | |||
651 | if (!busy) | ||
652 | tasklet_schedule(&dev->queue_task); | ||
653 | |||
654 | return err; | ||
655 | } | ||
656 | |||
657 | static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
658 | { | ||
659 | struct crypto_tfm *tfm = | ||
660 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
661 | struct sahara_ctx *ctx = crypto_ablkcipher_ctx( | ||
662 | crypto_ablkcipher_reqtfm(req)); | ||
663 | int err; | ||
664 | |||
665 | if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { | ||
666 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
667 | err = crypto_ablkcipher_encrypt(req); | ||
668 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
669 | return err; | ||
670 | } | ||
671 | |||
672 | return sahara_aes_crypt(req, FLAGS_ENCRYPT); | ||
673 | } | ||
674 | |||
675 | static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
676 | { | ||
677 | struct crypto_tfm *tfm = | ||
678 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
679 | struct sahara_ctx *ctx = crypto_ablkcipher_ctx( | ||
680 | crypto_ablkcipher_reqtfm(req)); | ||
681 | int err; | ||
682 | |||
683 | if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { | ||
684 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
685 | err = crypto_ablkcipher_decrypt(req); | ||
686 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
687 | return err; | ||
688 | } | ||
689 | |||
690 | return sahara_aes_crypt(req, 0); | ||
691 | } | ||
692 | |||
693 | static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
694 | { | ||
695 | struct crypto_tfm *tfm = | ||
696 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
697 | struct sahara_ctx *ctx = crypto_ablkcipher_ctx( | ||
698 | crypto_ablkcipher_reqtfm(req)); | ||
699 | int err; | ||
700 | |||
701 | if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { | ||
702 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
703 | err = crypto_ablkcipher_encrypt(req); | ||
704 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
705 | return err; | ||
706 | } | ||
707 | |||
708 | return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); | ||
709 | } | ||
710 | |||
711 | static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
712 | { | ||
713 | struct crypto_tfm *tfm = | ||
714 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
715 | struct sahara_ctx *ctx = crypto_ablkcipher_ctx( | ||
716 | crypto_ablkcipher_reqtfm(req)); | ||
717 | int err; | ||
718 | |||
719 | if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { | ||
720 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
721 | err = crypto_ablkcipher_decrypt(req); | ||
722 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
723 | return err; | ||
724 | } | ||
725 | |||
726 | return sahara_aes_crypt(req, FLAGS_CBC); | ||
727 | } | ||
728 | |||
729 | static int sahara_aes_cra_init(struct crypto_tfm *tfm) | ||
730 | { | ||
731 | const char *name = tfm->__crt_alg->cra_name; | ||
732 | struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); | ||
733 | |||
734 | ctx->fallback = crypto_alloc_ablkcipher(name, 0, | ||
735 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | ||
736 | if (IS_ERR(ctx->fallback)) { | ||
737 | pr_err("Error allocating fallback algo %s\n", name); | ||
738 | return PTR_ERR(ctx->fallback); | ||
739 | } | ||
740 | |||
741 | tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx); | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static void sahara_aes_cra_exit(struct crypto_tfm *tfm) | ||
747 | { | ||
748 | struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); | ||
749 | |||
750 | if (ctx->fallback) | ||
751 | crypto_free_ablkcipher(ctx->fallback); | ||
752 | ctx->fallback = NULL; | ||
753 | } | ||
754 | |||
755 | static struct crypto_alg aes_algs[] = { | ||
756 | { | ||
757 | .cra_name = "ecb(aes)", | ||
758 | .cra_driver_name = "sahara-ecb-aes", | ||
759 | .cra_priority = 300, | ||
760 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
761 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | ||
762 | .cra_blocksize = AES_BLOCK_SIZE, | ||
763 | .cra_ctxsize = sizeof(struct sahara_ctx), | ||
764 | .cra_alignmask = 0x0, | ||
765 | .cra_type = &crypto_ablkcipher_type, | ||
766 | .cra_module = THIS_MODULE, | ||
767 | .cra_init = sahara_aes_cra_init, | ||
768 | .cra_exit = sahara_aes_cra_exit, | ||
769 | .cra_u.ablkcipher = { | ||
770 | .min_keysize = AES_MIN_KEY_SIZE , | ||
771 | .max_keysize = AES_MAX_KEY_SIZE, | ||
772 | .setkey = sahara_aes_setkey, | ||
773 | .encrypt = sahara_aes_ecb_encrypt, | ||
774 | .decrypt = sahara_aes_ecb_decrypt, | ||
775 | } | ||
776 | }, { | ||
777 | .cra_name = "cbc(aes)", | ||
778 | .cra_driver_name = "sahara-cbc-aes", | ||
779 | .cra_priority = 300, | ||
780 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
781 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | ||
782 | .cra_blocksize = AES_BLOCK_SIZE, | ||
783 | .cra_ctxsize = sizeof(struct sahara_ctx), | ||
784 | .cra_alignmask = 0x0, | ||
785 | .cra_type = &crypto_ablkcipher_type, | ||
786 | .cra_module = THIS_MODULE, | ||
787 | .cra_init = sahara_aes_cra_init, | ||
788 | .cra_exit = sahara_aes_cra_exit, | ||
789 | .cra_u.ablkcipher = { | ||
790 | .min_keysize = AES_MIN_KEY_SIZE , | ||
791 | .max_keysize = AES_MAX_KEY_SIZE, | ||
792 | .ivsize = AES_BLOCK_SIZE, | ||
793 | .setkey = sahara_aes_setkey, | ||
794 | .encrypt = sahara_aes_cbc_encrypt, | ||
795 | .decrypt = sahara_aes_cbc_decrypt, | ||
796 | } | ||
797 | } | ||
798 | }; | ||
799 | |||
800 | static irqreturn_t sahara_irq_handler(int irq, void *data) | ||
801 | { | ||
802 | struct sahara_dev *dev = (struct sahara_dev *)data; | ||
803 | unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS); | ||
804 | unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); | ||
805 | |||
806 | del_timer(&dev->watchdog); | ||
807 | |||
808 | sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR, | ||
809 | SAHARA_REG_CMD); | ||
810 | |||
811 | sahara_decode_status(dev, stat); | ||
812 | |||
813 | if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) { | ||
814 | return IRQ_NONE; | ||
815 | } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) { | ||
816 | dev->error = 0; | ||
817 | } else { | ||
818 | sahara_decode_error(dev, err); | ||
819 | dev->error = -EINVAL; | ||
820 | } | ||
821 | |||
822 | tasklet_schedule(&dev->done_task); | ||
823 | |||
824 | return IRQ_HANDLED; | ||
825 | } | ||
826 | |||
827 | |||
828 | static int sahara_register_algs(struct sahara_dev *dev) | ||
829 | { | ||
830 | int err, i, j; | ||
831 | |||
832 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | ||
833 | INIT_LIST_HEAD(&aes_algs[i].cra_list); | ||
834 | err = crypto_register_alg(&aes_algs[i]); | ||
835 | if (err) | ||
836 | goto err_aes_algs; | ||
837 | } | ||
838 | |||
839 | return 0; | ||
840 | |||
841 | err_aes_algs: | ||
842 | for (j = 0; j < i; j++) | ||
843 | crypto_unregister_alg(&aes_algs[j]); | ||
844 | |||
845 | return err; | ||
846 | } | ||
847 | |||
848 | static void sahara_unregister_algs(struct sahara_dev *dev) | ||
849 | { | ||
850 | int i; | ||
851 | |||
852 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | ||
853 | crypto_unregister_alg(&aes_algs[i]); | ||
854 | } | ||
855 | |||
856 | static struct platform_device_id sahara_platform_ids[] = { | ||
857 | { .name = "sahara-imx27" }, | ||
858 | { /* sentinel */ } | ||
859 | }; | ||
860 | MODULE_DEVICE_TABLE(platform, sahara_platform_ids); | ||
861 | |||
862 | static struct of_device_id sahara_dt_ids[] = { | ||
863 | { .compatible = "fsl,imx27-sahara" }, | ||
864 | { /* sentinel */ } | ||
865 | }; | ||
866 | MODULE_DEVICE_TABLE(platform, sahara_dt_ids); | ||
867 | |||
868 | static int sahara_probe(struct platform_device *pdev) | ||
869 | { | ||
870 | struct sahara_dev *dev; | ||
871 | struct resource *res; | ||
872 | u32 version; | ||
873 | int irq; | ||
874 | int err; | ||
875 | int i; | ||
876 | |||
877 | dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL); | ||
878 | if (dev == NULL) { | ||
879 | dev_err(&pdev->dev, "unable to alloc data struct.\n"); | ||
880 | return -ENOMEM; | ||
881 | } | ||
882 | |||
883 | dev->device = &pdev->dev; | ||
884 | platform_set_drvdata(pdev, dev); | ||
885 | |||
886 | /* Get the base address */ | ||
887 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
888 | if (!res) { | ||
889 | dev_err(&pdev->dev, "failed to get memory region resource\n"); | ||
890 | return -ENODEV; | ||
891 | } | ||
892 | |||
893 | if (devm_request_mem_region(&pdev->dev, res->start, | ||
894 | resource_size(res), SAHARA_NAME) == NULL) { | ||
895 | dev_err(&pdev->dev, "failed to request memory region\n"); | ||
896 | return -ENOENT; | ||
897 | } | ||
898 | dev->regs_base = devm_ioremap(&pdev->dev, res->start, | ||
899 | resource_size(res)); | ||
900 | if (!dev->regs_base) { | ||
901 | dev_err(&pdev->dev, "failed to ioremap address region\n"); | ||
902 | return -ENOENT; | ||
903 | } | ||
904 | |||
905 | /* Get the IRQ */ | ||
906 | irq = platform_get_irq(pdev, 0); | ||
907 | if (irq < 0) { | ||
908 | dev_err(&pdev->dev, "failed to get irq resource\n"); | ||
909 | return irq; | ||
910 | } | ||
911 | |||
912 | if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler, | ||
913 | 0, SAHARA_NAME, dev) < 0) { | ||
914 | dev_err(&pdev->dev, "failed to request irq\n"); | ||
915 | return -ENOENT; | ||
916 | } | ||
917 | |||
918 | /* clocks */ | ||
919 | dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | ||
920 | if (IS_ERR(dev->clk_ipg)) { | ||
921 | dev_err(&pdev->dev, "Could not get ipg clock\n"); | ||
922 | return PTR_ERR(dev->clk_ipg); | ||
923 | } | ||
924 | |||
925 | dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); | ||
926 | if (IS_ERR(dev->clk_ahb)) { | ||
927 | dev_err(&pdev->dev, "Could not get ahb clock\n"); | ||
928 | return PTR_ERR(dev->clk_ahb); | ||
929 | } | ||
930 | |||
931 | /* Allocate HW descriptors */ | ||
932 | dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev, | ||
933 | SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), | ||
934 | &dev->hw_phys_desc[0], GFP_KERNEL); | ||
935 | if (!dev->hw_desc[0]) { | ||
936 | dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); | ||
937 | return -ENOMEM; | ||
938 | } | ||
939 | dev->hw_desc[1] = dev->hw_desc[0] + 1; | ||
940 | dev->hw_phys_desc[1] = dev->hw_phys_desc[0] + | ||
941 | sizeof(struct sahara_hw_desc); | ||
942 | |||
943 | /* Allocate space for iv and key */ | ||
944 | dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, | ||
945 | &dev->key_phys_base, GFP_KERNEL); | ||
946 | if (!dev->key_base) { | ||
947 | dev_err(&pdev->dev, "Could not allocate memory for key\n"); | ||
948 | err = -ENOMEM; | ||
949 | goto err_key; | ||
950 | } | ||
951 | dev->iv_base = dev->key_base + AES_KEYSIZE_128; | ||
952 | dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128; | ||
953 | |||
954 | /* Allocate space for HW links */ | ||
955 | dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, | ||
956 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), | ||
957 | &dev->hw_phys_link[0], GFP_KERNEL); | ||
958 | if (!dev->hw_link) { | ||
959 | dev_err(&pdev->dev, "Could not allocate hw links\n"); | ||
960 | err = -ENOMEM; | ||
961 | goto err_link; | ||
962 | } | ||
963 | for (i = 1; i < SAHARA_MAX_HW_LINK; i++) { | ||
964 | dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] + | ||
965 | sizeof(struct sahara_hw_link); | ||
966 | dev->hw_link[i] = dev->hw_link[i - 1] + 1; | ||
967 | } | ||
968 | |||
969 | crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); | ||
970 | |||
971 | dev_ptr = dev; | ||
972 | |||
973 | tasklet_init(&dev->queue_task, sahara_aes_queue_task, | ||
974 | (unsigned long)dev); | ||
975 | tasklet_init(&dev->done_task, sahara_aes_done_task, | ||
976 | (unsigned long)dev); | ||
977 | |||
978 | init_timer(&dev->watchdog); | ||
979 | dev->watchdog.function = &sahara_watchdog; | ||
980 | dev->watchdog.data = (unsigned long)dev; | ||
981 | |||
982 | clk_prepare_enable(dev->clk_ipg); | ||
983 | clk_prepare_enable(dev->clk_ahb); | ||
984 | |||
985 | version = sahara_read(dev, SAHARA_REG_VERSION); | ||
986 | if (version != SAHARA_VERSION_3) { | ||
987 | dev_err(&pdev->dev, "SAHARA version %d not supported\n", | ||
988 | version); | ||
989 | err = -ENODEV; | ||
990 | goto err_algs; | ||
991 | } | ||
992 | |||
993 | sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH, | ||
994 | SAHARA_REG_CMD); | ||
995 | sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) | | ||
996 | SAHARA_CONTROL_SET_MAXBURST(8) | | ||
997 | SAHARA_CONTROL_RNG_AUTORSD | | ||
998 | SAHARA_CONTROL_ENABLE_INT, | ||
999 | SAHARA_REG_CONTROL); | ||
1000 | |||
1001 | err = sahara_register_algs(dev); | ||
1002 | if (err) | ||
1003 | goto err_algs; | ||
1004 | |||
1005 | dev_info(&pdev->dev, "SAHARA version %d initialized\n", version); | ||
1006 | |||
1007 | return 0; | ||
1008 | |||
1009 | err_algs: | ||
1010 | dma_free_coherent(&pdev->dev, | ||
1011 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), | ||
1012 | dev->hw_link[0], dev->hw_phys_link[0]); | ||
1013 | clk_disable_unprepare(dev->clk_ipg); | ||
1014 | clk_disable_unprepare(dev->clk_ahb); | ||
1015 | dev_ptr = NULL; | ||
1016 | err_link: | ||
1017 | dma_free_coherent(&pdev->dev, | ||
1018 | 2 * AES_KEYSIZE_128, | ||
1019 | dev->key_base, dev->key_phys_base); | ||
1020 | err_key: | ||
1021 | dma_free_coherent(&pdev->dev, | ||
1022 | SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), | ||
1023 | dev->hw_desc[0], dev->hw_phys_desc[0]); | ||
1024 | |||
1025 | return err; | ||
1026 | } | ||
1027 | |||
1028 | static int sahara_remove(struct platform_device *pdev) | ||
1029 | { | ||
1030 | struct sahara_dev *dev = platform_get_drvdata(pdev); | ||
1031 | |||
1032 | dma_free_coherent(&pdev->dev, | ||
1033 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), | ||
1034 | dev->hw_link[0], dev->hw_phys_link[0]); | ||
1035 | dma_free_coherent(&pdev->dev, | ||
1036 | 2 * AES_KEYSIZE_128, | ||
1037 | dev->key_base, dev->key_phys_base); | ||
1038 | dma_free_coherent(&pdev->dev, | ||
1039 | SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), | ||
1040 | dev->hw_desc[0], dev->hw_phys_desc[0]); | ||
1041 | |||
1042 | tasklet_kill(&dev->done_task); | ||
1043 | tasklet_kill(&dev->queue_task); | ||
1044 | |||
1045 | sahara_unregister_algs(dev); | ||
1046 | |||
1047 | clk_disable_unprepare(dev->clk_ipg); | ||
1048 | clk_disable_unprepare(dev->clk_ahb); | ||
1049 | |||
1050 | dev_ptr = NULL; | ||
1051 | |||
1052 | return 0; | ||
1053 | } | ||
1054 | |||
1055 | static struct platform_driver sahara_driver = { | ||
1056 | .probe = sahara_probe, | ||
1057 | .remove = sahara_remove, | ||
1058 | .driver = { | ||
1059 | .name = SAHARA_NAME, | ||
1060 | .owner = THIS_MODULE, | ||
1061 | .of_match_table = of_match_ptr(sahara_dt_ids), | ||
1062 | }, | ||
1063 | .id_table = sahara_platform_ids, | ||
1064 | }; | ||
1065 | |||
1066 | module_platform_driver(sahara_driver); | ||
1067 | |||
1068 | MODULE_LICENSE("GPL"); | ||
1069 | MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>"); | ||
1070 | MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator"); | ||
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 1827e9f1f873..cf5508967539 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -938,6 +938,7 @@ static int hash_dma_final(struct ahash_request *req) | |||
938 | if (!ctx->device->dma.nents) { | 938 | if (!ctx->device->dma.nents) { |
939 | dev_err(device_data->dev, "[%s] " | 939 | dev_err(device_data->dev, "[%s] " |
940 | "ctx->device->dma.nents = 0", __func__); | 940 | "ctx->device->dma.nents = 0", __func__); |
941 | ret = ctx->device->dma.nents; | ||
941 | goto out; | 942 | goto out; |
942 | } | 943 | } |
943 | 944 | ||
@@ -945,6 +946,7 @@ static int hash_dma_final(struct ahash_request *req) | |||
945 | if (bytes_written != req->nbytes) { | 946 | if (bytes_written != req->nbytes) { |
946 | dev_err(device_data->dev, "[%s] " | 947 | dev_err(device_data->dev, "[%s] " |
947 | "hash_dma_write() failed!", __func__); | 948 | "hash_dma_write() failed!", __func__); |
949 | ret = bytes_written; | ||
948 | goto out; | 950 | goto out; |
949 | } | 951 | } |
950 | 952 | ||
@@ -1367,14 +1369,12 @@ static int hash_setkey(struct crypto_ahash *tfm, | |||
1367 | /** | 1369 | /** |
1368 | * Freed in final. | 1370 | * Freed in final. |
1369 | */ | 1371 | */ |
1370 | ctx->key = kmalloc(keylen, GFP_KERNEL); | 1372 | ctx->key = kmemdup(key, keylen, GFP_KERNEL); |
1371 | if (!ctx->key) { | 1373 | if (!ctx->key) { |
1372 | pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " | 1374 | pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " |
1373 | "for %d\n", __func__, alg); | 1375 | "for %d\n", __func__, alg); |
1374 | return -ENOMEM; | 1376 | return -ENOMEM; |
1375 | } | 1377 | } |
1376 | |||
1377 | memcpy(ctx->key, key, keylen); | ||
1378 | ctx->keylen = keylen; | 1378 | ctx->keylen = keylen; |
1379 | 1379 | ||
1380 | return ret; | 1380 | return ret; |