diff options
author | Nicolas Royer <nicolas@eukrea.com> | 2013-02-20 11:10:26 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2013-03-10 04:46:42 -0400 |
commit | d4905b38d1f6b60761a6fd16f45ebd1fac8b6e1f (patch) | |
tree | 4f81ac788b02368092133f742e52de9c3dd52c38 /drivers/crypto | |
parent | 1f858040c2f78013fd2b10ddeb9dc157c3362b04 (diff) |
crypto: atmel-sha - add support for latest release of the IP (0x410)
Updates from IP release 0x320 to 0x400:
- add DMA support (previous IP revision use PDC)
- add DMA double input buffer support
- add SHA224 support
Update from IP release 0x400 to 0x410:
- add SHA384 and SHA512 support
Signed-off-by: Nicolas Royer <nicolas@eukrea.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Eric Bénard <eric@eukrea.com>
Tested-by: Eric Bénard <eric@eukrea.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 8 | ||||
-rw-r--r-- | drivers/crypto/atmel-sha-regs.h | 7 | ||||
-rw-r--r-- | drivers/crypto/atmel-sha.c | 586 |
3 files changed, 497 insertions, 104 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 87ec4d027c25..e66fb0a332d4 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -361,15 +361,17 @@ config CRYPTO_DEV_ATMEL_TDES | |||
361 | will be called atmel-tdes. | 361 | will be called atmel-tdes. |
362 | 362 | ||
363 | config CRYPTO_DEV_ATMEL_SHA | 363 | config CRYPTO_DEV_ATMEL_SHA |
364 | tristate "Support for Atmel SHA1/SHA256 hw accelerator" | 364 | tristate "Support for Atmel SHA hw accelerator" |
365 | depends on ARCH_AT91 | 365 | depends on ARCH_AT91 |
366 | select CRYPTO_SHA1 | 366 | select CRYPTO_SHA1 |
367 | select CRYPTO_SHA256 | 367 | select CRYPTO_SHA256 |
368 | select CRYPTO_SHA512 | ||
368 | select CRYPTO_ALGAPI | 369 | select CRYPTO_ALGAPI |
369 | help | 370 | help |
370 | Some Atmel processors have SHA1/SHA256 hw accelerator. | 371 | Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512 |
372 | hw accelerator. | ||
371 | Select this if you want to use the Atmel module for | 373 | Select this if you want to use the Atmel module for |
372 | SHA1/SHA256 algorithms. | 374 | SHA1/SHA224/SHA256/SHA384/SHA512 algorithms. |
373 | 375 | ||
374 | To compile this driver as a module, choose M here: the module | 376 | To compile this driver as a module, choose M here: the module |
375 | will be called atmel-sha. | 377 | will be called atmel-sha. |
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h index dc53a20d7da1..83b2d7425666 100644 --- a/drivers/crypto/atmel-sha-regs.h +++ b/drivers/crypto/atmel-sha-regs.h | |||
@@ -14,10 +14,13 @@ | |||
14 | #define SHA_MR_MODE_MANUAL 0x0 | 14 | #define SHA_MR_MODE_MANUAL 0x0 |
15 | #define SHA_MR_MODE_AUTO 0x1 | 15 | #define SHA_MR_MODE_AUTO 0x1 |
16 | #define SHA_MR_MODE_PDC 0x2 | 16 | #define SHA_MR_MODE_PDC 0x2 |
17 | #define SHA_MR_DUALBUFF (1 << 3) | ||
18 | #define SHA_MR_PROCDLY (1 << 4) | 17 | #define SHA_MR_PROCDLY (1 << 4) |
19 | #define SHA_MR_ALGO_SHA1 (0 << 8) | 18 | #define SHA_MR_ALGO_SHA1 (0 << 8) |
20 | #define SHA_MR_ALGO_SHA256 (1 << 8) | 19 | #define SHA_MR_ALGO_SHA256 (1 << 8) |
20 | #define SHA_MR_ALGO_SHA384 (2 << 8) | ||
21 | #define SHA_MR_ALGO_SHA512 (3 << 8) | ||
22 | #define SHA_MR_ALGO_SHA224 (4 << 8) | ||
23 | #define SHA_MR_DUALBUFF (1 << 16) | ||
21 | 24 | ||
22 | #define SHA_IER 0x10 | 25 | #define SHA_IER 0x10 |
23 | #define SHA_IDR 0x14 | 26 | #define SHA_IDR 0x14 |
@@ -33,6 +36,8 @@ | |||
33 | #define SHA_ISR_URAT_MR (0x2 << 12) | 36 | #define SHA_ISR_URAT_MR (0x2 << 12) |
34 | #define SHA_ISR_URAT_WO (0x5 << 12) | 37 | #define SHA_ISR_URAT_WO (0x5 << 12) |
35 | 38 | ||
39 | #define SHA_HW_VERSION 0xFC | ||
40 | |||
36 | #define SHA_TPR 0x108 | 41 | #define SHA_TPR 0x108 |
37 | #define SHA_TCR 0x10C | 42 | #define SHA_TCR 0x10C |
38 | #define SHA_TNPR 0x118 | 43 | #define SHA_TNPR 0x118 |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 4918e9424d31..eaed8bf183bc 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <crypto/sha.h> | 38 | #include <crypto/sha.h> |
39 | #include <crypto/hash.h> | 39 | #include <crypto/hash.h> |
40 | #include <crypto/internal/hash.h> | 40 | #include <crypto/internal/hash.h> |
41 | #include <linux/platform_data/crypto-atmel.h> | ||
41 | #include "atmel-sha-regs.h" | 42 | #include "atmel-sha-regs.h" |
42 | 43 | ||
43 | /* SHA flags */ | 44 | /* SHA flags */ |
@@ -52,11 +53,12 @@ | |||
52 | #define SHA_FLAGS_FINUP BIT(16) | 53 | #define SHA_FLAGS_FINUP BIT(16) |
53 | #define SHA_FLAGS_SG BIT(17) | 54 | #define SHA_FLAGS_SG BIT(17) |
54 | #define SHA_FLAGS_SHA1 BIT(18) | 55 | #define SHA_FLAGS_SHA1 BIT(18) |
55 | #define SHA_FLAGS_SHA256 BIT(19) | 56 | #define SHA_FLAGS_SHA224 BIT(19) |
56 | #define SHA_FLAGS_ERROR BIT(20) | 57 | #define SHA_FLAGS_SHA256 BIT(20) |
57 | #define SHA_FLAGS_PAD BIT(21) | 58 | #define SHA_FLAGS_SHA384 BIT(21) |
58 | 59 | #define SHA_FLAGS_SHA512 BIT(22) | |
59 | #define SHA_FLAGS_DUALBUFF BIT(24) | 60 | #define SHA_FLAGS_ERROR BIT(23) |
61 | #define SHA_FLAGS_PAD BIT(24) | ||
60 | 62 | ||
61 | #define SHA_OP_UPDATE 1 | 63 | #define SHA_OP_UPDATE 1 |
62 | #define SHA_OP_FINAL 2 | 64 | #define SHA_OP_FINAL 2 |
@@ -65,6 +67,12 @@ | |||
65 | 67 | ||
66 | #define ATMEL_SHA_DMA_THRESHOLD 56 | 68 | #define ATMEL_SHA_DMA_THRESHOLD 56 |
67 | 69 | ||
70 | struct atmel_sha_caps { | ||
71 | bool has_dma; | ||
72 | bool has_dualbuff; | ||
73 | bool has_sha224; | ||
74 | bool has_sha_384_512; | ||
75 | }; | ||
68 | 76 | ||
69 | struct atmel_sha_dev; | 77 | struct atmel_sha_dev; |
70 | 78 | ||
@@ -73,8 +81,8 @@ struct atmel_sha_reqctx { | |||
73 | unsigned long flags; | 81 | unsigned long flags; |
74 | unsigned long op; | 82 | unsigned long op; |
75 | 83 | ||
76 | u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); | 84 | u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); |
77 | size_t digcnt; | 85 | u64 digcnt[2]; |
78 | size_t bufcnt; | 86 | size_t bufcnt; |
79 | size_t buflen; | 87 | size_t buflen; |
80 | dma_addr_t dma_addr; | 88 | dma_addr_t dma_addr; |
@@ -84,6 +92,8 @@ struct atmel_sha_reqctx { | |||
84 | unsigned int offset; /* offset in current sg */ | 92 | unsigned int offset; /* offset in current sg */ |
85 | unsigned int total; /* total request */ | 93 | unsigned int total; /* total request */ |
86 | 94 | ||
95 | size_t block_size; | ||
96 | |||
87 | u8 buffer[0] __aligned(sizeof(u32)); | 97 | u8 buffer[0] __aligned(sizeof(u32)); |
88 | }; | 98 | }; |
89 | 99 | ||
@@ -97,7 +107,12 @@ struct atmel_sha_ctx { | |||
97 | 107 | ||
98 | }; | 108 | }; |
99 | 109 | ||
100 | #define ATMEL_SHA_QUEUE_LENGTH 1 | 110 | #define ATMEL_SHA_QUEUE_LENGTH 50 |
111 | |||
112 | struct atmel_sha_dma { | ||
113 | struct dma_chan *chan; | ||
114 | struct dma_slave_config dma_conf; | ||
115 | }; | ||
101 | 116 | ||
102 | struct atmel_sha_dev { | 117 | struct atmel_sha_dev { |
103 | struct list_head list; | 118 | struct list_head list; |
@@ -114,6 +129,12 @@ struct atmel_sha_dev { | |||
114 | unsigned long flags; | 129 | unsigned long flags; |
115 | struct crypto_queue queue; | 130 | struct crypto_queue queue; |
116 | struct ahash_request *req; | 131 | struct ahash_request *req; |
132 | |||
133 | struct atmel_sha_dma dma_lch_in; | ||
134 | |||
135 | struct atmel_sha_caps caps; | ||
136 | |||
137 | u32 hw_version; | ||
117 | }; | 138 | }; |
118 | 139 | ||
119 | struct atmel_sha_drv { | 140 | struct atmel_sha_drv { |
@@ -137,14 +158,6 @@ static inline void atmel_sha_write(struct atmel_sha_dev *dd, | |||
137 | writel_relaxed(value, dd->io_base + offset); | 158 | writel_relaxed(value, dd->io_base + offset); |
138 | } | 159 | } |
139 | 160 | ||
140 | static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd) | ||
141 | { | ||
142 | atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF); | ||
143 | |||
144 | if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF) | ||
145 | dd->flags |= SHA_FLAGS_DUALBUFF; | ||
146 | } | ||
147 | |||
148 | static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) | 161 | static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) |
149 | { | 162 | { |
150 | size_t count; | 163 | size_t count; |
@@ -176,31 +189,58 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) | |||
176 | } | 189 | } |
177 | 190 | ||
178 | /* | 191 | /* |
179 | * The purpose of this padding is to ensure that the padded message | 192 | * The purpose of this padding is to ensure that the padded message is a |
180 | * is a multiple of 512 bits. The bit "1" is appended at the end of | 193 | * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). |
181 | * the message followed by "padlen-1" zero bits. Then a 64 bits block | 194 | * The bit "1" is appended at the end of the message followed by |
182 | * equals to the message length in bits is appended. | 195 | * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or |
196 | * 128 bits block (SHA384/SHA512) equals to the message length in bits | ||
197 | * is appended. | ||
183 | * | 198 | * |
184 | * padlen is calculated as followed: | 199 | * For SHA1/SHA224/SHA256, padlen is calculated as followed: |
185 | * - if message length < 56 bytes then padlen = 56 - message length | 200 | * - if message length < 56 bytes then padlen = 56 - message length |
186 | * - else padlen = 64 + 56 - message length | 201 | * - else padlen = 64 + 56 - message length |
202 | * | ||
203 | * For SHA384/SHA512, padlen is calculated as followed: | ||
204 | * - if message length < 112 bytes then padlen = 112 - message length | ||
205 | * - else padlen = 128 + 112 - message length | ||
187 | */ | 206 | */ |
188 | static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) | 207 | static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) |
189 | { | 208 | { |
190 | unsigned int index, padlen; | 209 | unsigned int index, padlen; |
191 | u64 bits; | 210 | u64 bits[2]; |
192 | u64 size; | 211 | u64 size[2]; |
193 | 212 | ||
194 | bits = (ctx->bufcnt + ctx->digcnt + length) << 3; | 213 | size[0] = ctx->digcnt[0]; |
195 | size = cpu_to_be64(bits); | 214 | size[1] = ctx->digcnt[1]; |
196 | 215 | ||
197 | index = ctx->bufcnt & 0x3f; | 216 | size[0] += ctx->bufcnt; |
198 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | 217 | if (size[0] < ctx->bufcnt) |
199 | *(ctx->buffer + ctx->bufcnt) = 0x80; | 218 | size[1]++; |
200 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | 219 | |
201 | memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8); | 220 | size[0] += length; |
202 | ctx->bufcnt += padlen + 8; | 221 | if (size[0] < length) |
203 | ctx->flags |= SHA_FLAGS_PAD; | 222 | size[1]++; |
223 | |||
224 | bits[1] = cpu_to_be64(size[0] << 3); | ||
225 | bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61); | ||
226 | |||
227 | if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { | ||
228 | index = ctx->bufcnt & 0x7f; | ||
229 | padlen = (index < 112) ? (112 - index) : ((128+112) - index); | ||
230 | *(ctx->buffer + ctx->bufcnt) = 0x80; | ||
231 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | ||
232 | memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); | ||
233 | ctx->bufcnt += padlen + 16; | ||
234 | ctx->flags |= SHA_FLAGS_PAD; | ||
235 | } else { | ||
236 | index = ctx->bufcnt & 0x3f; | ||
237 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | ||
238 | *(ctx->buffer + ctx->bufcnt) = 0x80; | ||
239 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | ||
240 | memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); | ||
241 | ctx->bufcnt += padlen + 8; | ||
242 | ctx->flags |= SHA_FLAGS_PAD; | ||
243 | } | ||
204 | } | 244 | } |
205 | 245 | ||
206 | static int atmel_sha_init(struct ahash_request *req) | 246 | static int atmel_sha_init(struct ahash_request *req) |
@@ -231,13 +271,35 @@ static int atmel_sha_init(struct ahash_request *req) | |||
231 | dev_dbg(dd->dev, "init: digest size: %d\n", | 271 | dev_dbg(dd->dev, "init: digest size: %d\n", |
232 | crypto_ahash_digestsize(tfm)); | 272 | crypto_ahash_digestsize(tfm)); |
233 | 273 | ||
234 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | 274 | switch (crypto_ahash_digestsize(tfm)) { |
275 | case SHA1_DIGEST_SIZE: | ||
235 | ctx->flags |= SHA_FLAGS_SHA1; | 276 | ctx->flags |= SHA_FLAGS_SHA1; |
236 | else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE) | 277 | ctx->block_size = SHA1_BLOCK_SIZE; |
278 | break; | ||
279 | case SHA224_DIGEST_SIZE: | ||
280 | ctx->flags |= SHA_FLAGS_SHA224; | ||
281 | ctx->block_size = SHA224_BLOCK_SIZE; | ||
282 | break; | ||
283 | case SHA256_DIGEST_SIZE: | ||
237 | ctx->flags |= SHA_FLAGS_SHA256; | 284 | ctx->flags |= SHA_FLAGS_SHA256; |
285 | ctx->block_size = SHA256_BLOCK_SIZE; | ||
286 | break; | ||
287 | case SHA384_DIGEST_SIZE: | ||
288 | ctx->flags |= SHA_FLAGS_SHA384; | ||
289 | ctx->block_size = SHA384_BLOCK_SIZE; | ||
290 | break; | ||
291 | case SHA512_DIGEST_SIZE: | ||
292 | ctx->flags |= SHA_FLAGS_SHA512; | ||
293 | ctx->block_size = SHA512_BLOCK_SIZE; | ||
294 | break; | ||
295 | default: | ||
296 | return -EINVAL; | ||
297 | break; | ||
298 | } | ||
238 | 299 | ||
239 | ctx->bufcnt = 0; | 300 | ctx->bufcnt = 0; |
240 | ctx->digcnt = 0; | 301 | ctx->digcnt[0] = 0; |
302 | ctx->digcnt[1] = 0; | ||
241 | ctx->buflen = SHA_BUFFER_LEN; | 303 | ctx->buflen = SHA_BUFFER_LEN; |
242 | 304 | ||
243 | return 0; | 305 | return 0; |
@@ -249,19 +311,28 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) | |||
249 | u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; | 311 | u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; |
250 | 312 | ||
251 | if (likely(dma)) { | 313 | if (likely(dma)) { |
252 | atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); | 314 | if (!dd->caps.has_dma) |
315 | atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); | ||
253 | valmr = SHA_MR_MODE_PDC; | 316 | valmr = SHA_MR_MODE_PDC; |
254 | if (dd->flags & SHA_FLAGS_DUALBUFF) | 317 | if (dd->caps.has_dualbuff) |
255 | valmr = SHA_MR_DUALBUFF; | 318 | valmr |= SHA_MR_DUALBUFF; |
256 | } else { | 319 | } else { |
257 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | 320 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); |
258 | } | 321 | } |
259 | 322 | ||
260 | if (ctx->flags & SHA_FLAGS_SHA256) | 323 | if (ctx->flags & SHA_FLAGS_SHA1) |
324 | valmr |= SHA_MR_ALGO_SHA1; | ||
325 | else if (ctx->flags & SHA_FLAGS_SHA224) | ||
326 | valmr |= SHA_MR_ALGO_SHA224; | ||
327 | else if (ctx->flags & SHA_FLAGS_SHA256) | ||
261 | valmr |= SHA_MR_ALGO_SHA256; | 328 | valmr |= SHA_MR_ALGO_SHA256; |
329 | else if (ctx->flags & SHA_FLAGS_SHA384) | ||
330 | valmr |= SHA_MR_ALGO_SHA384; | ||
331 | else if (ctx->flags & SHA_FLAGS_SHA512) | ||
332 | valmr |= SHA_MR_ALGO_SHA512; | ||
262 | 333 | ||
263 | /* Setting CR_FIRST only for the first iteration */ | 334 | /* Setting CR_FIRST only for the first iteration */ |
264 | if (!ctx->digcnt) | 335 | if (!(ctx->digcnt[0] || ctx->digcnt[1])) |
265 | valcr = SHA_CR_FIRST; | 336 | valcr = SHA_CR_FIRST; |
266 | 337 | ||
267 | atmel_sha_write(dd, SHA_CR, valcr); | 338 | atmel_sha_write(dd, SHA_CR, valcr); |
@@ -275,13 +346,15 @@ static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, | |||
275 | int count, len32; | 346 | int count, len32; |
276 | const u32 *buffer = (const u32 *)buf; | 347 | const u32 *buffer = (const u32 *)buf; |
277 | 348 | ||
278 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | 349 | dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", |
279 | ctx->digcnt, length, final); | 350 | ctx->digcnt[1], ctx->digcnt[0], length, final); |
280 | 351 | ||
281 | atmel_sha_write_ctrl(dd, 0); | 352 | atmel_sha_write_ctrl(dd, 0); |
282 | 353 | ||
283 | /* should be non-zero before next lines to disable clocks later */ | 354 | /* should be non-zero before next lines to disable clocks later */ |
284 | ctx->digcnt += length; | 355 | ctx->digcnt[0] += length; |
356 | if (ctx->digcnt[0] < length) | ||
357 | ctx->digcnt[1]++; | ||
285 | 358 | ||
286 | if (final) | 359 | if (final) |
287 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | 360 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ |
@@ -302,8 +375,8 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |||
302 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | 375 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); |
303 | int len32; | 376 | int len32; |
304 | 377 | ||
305 | dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n", | 378 | dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", |
306 | ctx->digcnt, length1, final); | 379 | ctx->digcnt[1], ctx->digcnt[0], length1, final); |
307 | 380 | ||
308 | len32 = DIV_ROUND_UP(length1, sizeof(u32)); | 381 | len32 = DIV_ROUND_UP(length1, sizeof(u32)); |
309 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); | 382 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); |
@@ -317,7 +390,9 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |||
317 | atmel_sha_write_ctrl(dd, 1); | 390 | atmel_sha_write_ctrl(dd, 1); |
318 | 391 | ||
319 | /* should be non-zero before next lines to disable clocks later */ | 392 | /* should be non-zero before next lines to disable clocks later */ |
320 | ctx->digcnt += length1; | 393 | ctx->digcnt[0] += length1; |
394 | if (ctx->digcnt[0] < length1) | ||
395 | ctx->digcnt[1]++; | ||
321 | 396 | ||
322 | if (final) | 397 | if (final) |
323 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | 398 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ |
@@ -330,6 +405,86 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |||
330 | return -EINPROGRESS; | 405 | return -EINPROGRESS; |
331 | } | 406 | } |
332 | 407 | ||
408 | static void atmel_sha_dma_callback(void *data) | ||
409 | { | ||
410 | struct atmel_sha_dev *dd = data; | ||
411 | |||
412 | /* dma_lch_in - completed - wait DATRDY */ | ||
413 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | ||
414 | } | ||
415 | |||
416 | static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | ||
417 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | ||
418 | { | ||
419 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
420 | struct dma_async_tx_descriptor *in_desc; | ||
421 | struct scatterlist sg[2]; | ||
422 | |||
423 | dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", | ||
424 | ctx->digcnt[1], ctx->digcnt[0], length1, final); | ||
425 | |||
426 | if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 | | ||
427 | SHA_FLAGS_SHA256)) { | ||
428 | dd->dma_lch_in.dma_conf.src_maxburst = 16; | ||
429 | dd->dma_lch_in.dma_conf.dst_maxburst = 16; | ||
430 | } else { | ||
431 | dd->dma_lch_in.dma_conf.src_maxburst = 32; | ||
432 | dd->dma_lch_in.dma_conf.dst_maxburst = 32; | ||
433 | } | ||
434 | |||
435 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | ||
436 | |||
437 | if (length2) { | ||
438 | sg_init_table(sg, 2); | ||
439 | sg_dma_address(&sg[0]) = dma_addr1; | ||
440 | sg_dma_len(&sg[0]) = length1; | ||
441 | sg_dma_address(&sg[1]) = dma_addr2; | ||
442 | sg_dma_len(&sg[1]) = length2; | ||
443 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, | ||
444 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
445 | } else { | ||
446 | sg_init_table(sg, 1); | ||
447 | sg_dma_address(&sg[0]) = dma_addr1; | ||
448 | sg_dma_len(&sg[0]) = length1; | ||
449 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, | ||
450 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
451 | } | ||
452 | if (!in_desc) | ||
453 | return -EINVAL; | ||
454 | |||
455 | in_desc->callback = atmel_sha_dma_callback; | ||
456 | in_desc->callback_param = dd; | ||
457 | |||
458 | atmel_sha_write_ctrl(dd, 1); | ||
459 | |||
460 | /* should be non-zero before next lines to disable clocks later */ | ||
461 | ctx->digcnt[0] += length1; | ||
462 | if (ctx->digcnt[0] < length1) | ||
463 | ctx->digcnt[1]++; | ||
464 | |||
465 | if (final) | ||
466 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | ||
467 | |||
468 | dd->flags |= SHA_FLAGS_DMA_ACTIVE; | ||
469 | |||
470 | /* Start DMA transfer */ | ||
471 | dmaengine_submit(in_desc); | ||
472 | dma_async_issue_pending(dd->dma_lch_in.chan); | ||
473 | |||
474 | return -EINPROGRESS; | ||
475 | } | ||
476 | |||
477 | static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | ||
478 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | ||
479 | { | ||
480 | if (dd->caps.has_dma) | ||
481 | return atmel_sha_xmit_dma(dd, dma_addr1, length1, | ||
482 | dma_addr2, length2, final); | ||
483 | else | ||
484 | return atmel_sha_xmit_pdc(dd, dma_addr1, length1, | ||
485 | dma_addr2, length2, final); | ||
486 | } | ||
487 | |||
333 | static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) | 488 | static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) |
334 | { | 489 | { |
335 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | 490 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); |
@@ -337,7 +492,6 @@ static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) | |||
337 | 492 | ||
338 | atmel_sha_append_sg(ctx); | 493 | atmel_sha_append_sg(ctx); |
339 | atmel_sha_fill_padding(ctx, 0); | 494 | atmel_sha_fill_padding(ctx, 0); |
340 | |||
341 | bufcnt = ctx->bufcnt; | 495 | bufcnt = ctx->bufcnt; |
342 | ctx->bufcnt = 0; | 496 | ctx->bufcnt = 0; |
343 | 497 | ||
@@ -349,17 +503,17 @@ static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, | |||
349 | size_t length, int final) | 503 | size_t length, int final) |
350 | { | 504 | { |
351 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | 505 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, |
352 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | 506 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
353 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | 507 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
354 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + | 508 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + |
355 | SHA1_BLOCK_SIZE); | 509 | ctx->block_size); |
356 | return -EINVAL; | 510 | return -EINVAL; |
357 | } | 511 | } |
358 | 512 | ||
359 | ctx->flags &= ~SHA_FLAGS_SG; | 513 | ctx->flags &= ~SHA_FLAGS_SG; |
360 | 514 | ||
361 | /* next call does not fail... so no unmap in the case of error */ | 515 | /* next call does not fail... so no unmap in the case of error */ |
362 | return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final); | 516 | return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); |
363 | } | 517 | } |
364 | 518 | ||
365 | static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) | 519 | static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) |
@@ -372,8 +526,8 @@ static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) | |||
372 | 526 | ||
373 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | 527 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; |
374 | 528 | ||
375 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | 529 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n", |
376 | ctx->bufcnt, ctx->digcnt, final); | 530 | ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); |
377 | 531 | ||
378 | if (final) | 532 | if (final) |
379 | atmel_sha_fill_padding(ctx, 0); | 533 | atmel_sha_fill_padding(ctx, 0); |
@@ -400,30 +554,25 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
400 | if (ctx->bufcnt || ctx->offset) | 554 | if (ctx->bufcnt || ctx->offset) |
401 | return atmel_sha_update_dma_slow(dd); | 555 | return atmel_sha_update_dma_slow(dd); |
402 | 556 | ||
403 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | 557 | dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n", |
404 | ctx->digcnt, ctx->bufcnt, ctx->total); | 558 | ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); |
405 | 559 | ||
406 | sg = ctx->sg; | 560 | sg = ctx->sg; |
407 | 561 | ||
408 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | 562 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) |
409 | return atmel_sha_update_dma_slow(dd); | 563 | return atmel_sha_update_dma_slow(dd); |
410 | 564 | ||
411 | if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE)) | 565 | if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) |
412 | /* size is not SHA1_BLOCK_SIZE aligned */ | 566 | /* size is not ctx->block_size aligned */ |
413 | return atmel_sha_update_dma_slow(dd); | 567 | return atmel_sha_update_dma_slow(dd); |
414 | 568 | ||
415 | length = min(ctx->total, sg->length); | 569 | length = min(ctx->total, sg->length); |
416 | 570 | ||
417 | if (sg_is_last(sg)) { | 571 | if (sg_is_last(sg)) { |
418 | if (!(ctx->flags & SHA_FLAGS_FINUP)) { | 572 | if (!(ctx->flags & SHA_FLAGS_FINUP)) { |
419 | /* not last sg must be SHA1_BLOCK_SIZE aligned */ | 573 | /* not last sg must be ctx->block_size aligned */ |
420 | tail = length & (SHA1_BLOCK_SIZE - 1); | 574 | tail = length & (ctx->block_size - 1); |
421 | length -= tail; | 575 | length -= tail; |
422 | if (length == 0) { | ||
423 | /* offset where to start slow */ | ||
424 | ctx->offset = length; | ||
425 | return atmel_sha_update_dma_slow(dd); | ||
426 | } | ||
427 | } | 576 | } |
428 | } | 577 | } |
429 | 578 | ||
@@ -434,7 +583,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
434 | 583 | ||
435 | /* Add padding */ | 584 | /* Add padding */ |
436 | if (final) { | 585 | if (final) { |
437 | tail = length & (SHA1_BLOCK_SIZE - 1); | 586 | tail = length & (ctx->block_size - 1); |
438 | length -= tail; | 587 | length -= tail; |
439 | ctx->total += tail; | 588 | ctx->total += tail; |
440 | ctx->offset = length; /* offset where to start slow */ | 589 | ctx->offset = length; /* offset where to start slow */ |
@@ -445,10 +594,10 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
445 | atmel_sha_fill_padding(ctx, length); | 594 | atmel_sha_fill_padding(ctx, length); |
446 | 595 | ||
447 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | 596 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, |
448 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | 597 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
449 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | 598 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
450 | dev_err(dd->dev, "dma %u bytes error\n", | 599 | dev_err(dd->dev, "dma %u bytes error\n", |
451 | ctx->buflen + SHA1_BLOCK_SIZE); | 600 | ctx->buflen + ctx->block_size); |
452 | return -EINVAL; | 601 | return -EINVAL; |
453 | } | 602 | } |
454 | 603 | ||
@@ -456,7 +605,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
456 | ctx->flags &= ~SHA_FLAGS_SG; | 605 | ctx->flags &= ~SHA_FLAGS_SG; |
457 | count = ctx->bufcnt; | 606 | count = ctx->bufcnt; |
458 | ctx->bufcnt = 0; | 607 | ctx->bufcnt = 0; |
459 | return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0, | 608 | return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, |
460 | 0, final); | 609 | 0, final); |
461 | } else { | 610 | } else { |
462 | ctx->sg = sg; | 611 | ctx->sg = sg; |
@@ -470,7 +619,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
470 | 619 | ||
471 | count = ctx->bufcnt; | 620 | count = ctx->bufcnt; |
472 | ctx->bufcnt = 0; | 621 | ctx->bufcnt = 0; |
473 | return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), | 622 | return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), |
474 | length, ctx->dma_addr, count, final); | 623 | length, ctx->dma_addr, count, final); |
475 | } | 624 | } |
476 | } | 625 | } |
@@ -483,7 +632,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |||
483 | ctx->flags |= SHA_FLAGS_SG; | 632 | ctx->flags |= SHA_FLAGS_SG; |
484 | 633 | ||
485 | /* next call does not fail... so no unmap in the case of error */ | 634 | /* next call does not fail... so no unmap in the case of error */ |
486 | return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0, | 635 | return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, |
487 | 0, final); | 636 | 0, final); |
488 | } | 637 | } |
489 | 638 | ||
@@ -498,12 +647,13 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) | |||
498 | if (ctx->sg) | 647 | if (ctx->sg) |
499 | ctx->offset = 0; | 648 | ctx->offset = 0; |
500 | } | 649 | } |
501 | if (ctx->flags & SHA_FLAGS_PAD) | 650 | if (ctx->flags & SHA_FLAGS_PAD) { |
502 | dma_unmap_single(dd->dev, ctx->dma_addr, | 651 | dma_unmap_single(dd->dev, ctx->dma_addr, |
503 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | 652 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
653 | } | ||
504 | } else { | 654 | } else { |
505 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + | 655 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + |
506 | SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | 656 | ctx->block_size, DMA_TO_DEVICE); |
507 | } | 657 | } |
508 | 658 | ||
509 | return 0; | 659 | return 0; |
@@ -515,8 +665,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd) | |||
515 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | 665 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); |
516 | int err; | 666 | int err; |
517 | 667 | ||
518 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | 668 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", |
519 | ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0); | 669 | ctx->total, ctx->digcnt[1], ctx->digcnt[0]); |
520 | 670 | ||
521 | if (ctx->flags & SHA_FLAGS_CPU) | 671 | if (ctx->flags & SHA_FLAGS_CPU) |
522 | err = atmel_sha_update_cpu(dd); | 672 | err = atmel_sha_update_cpu(dd); |
@@ -524,8 +674,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd) | |||
524 | err = atmel_sha_update_dma_start(dd); | 674 | err = atmel_sha_update_dma_start(dd); |
525 | 675 | ||
526 | /* wait for dma completion before can take more data */ | 676 | /* wait for dma completion before can take more data */ |
527 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", | 677 | dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", |
528 | err, ctx->digcnt); | 678 | err, ctx->digcnt[1], ctx->digcnt[0]); |
529 | 679 | ||
530 | return err; | 680 | return err; |
531 | } | 681 | } |
@@ -562,12 +712,21 @@ static void atmel_sha_copy_hash(struct ahash_request *req) | |||
562 | u32 *hash = (u32 *)ctx->digest; | 712 | u32 *hash = (u32 *)ctx->digest; |
563 | int i; | 713 | int i; |
564 | 714 | ||
565 | if (likely(ctx->flags & SHA_FLAGS_SHA1)) | 715 | if (ctx->flags & SHA_FLAGS_SHA1) |
566 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | 716 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) |
567 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | 717 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); |
568 | else | 718 | else if (ctx->flags & SHA_FLAGS_SHA224) |
719 | for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++) | ||
720 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
721 | else if (ctx->flags & SHA_FLAGS_SHA256) | ||
569 | for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) | 722 | for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) |
570 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | 723 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); |
724 | else if (ctx->flags & SHA_FLAGS_SHA384) | ||
725 | for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++) | ||
726 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
727 | else | ||
728 | for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++) | ||
729 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
571 | } | 730 | } |
572 | 731 | ||
573 | static void atmel_sha_copy_ready_hash(struct ahash_request *req) | 732 | static void atmel_sha_copy_ready_hash(struct ahash_request *req) |
@@ -577,10 +736,16 @@ static void atmel_sha_copy_ready_hash(struct ahash_request *req) | |||
577 | if (!req->result) | 736 | if (!req->result) |
578 | return; | 737 | return; |
579 | 738 | ||
580 | if (likely(ctx->flags & SHA_FLAGS_SHA1)) | 739 | if (ctx->flags & SHA_FLAGS_SHA1) |
581 | memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); | 740 | memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); |
582 | else | 741 | else if (ctx->flags & SHA_FLAGS_SHA224) |
742 | memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); | ||
743 | else if (ctx->flags & SHA_FLAGS_SHA256) | ||
583 | memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); | 744 | memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); |
745 | else if (ctx->flags & SHA_FLAGS_SHA384) | ||
746 | memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); | ||
747 | else | ||
748 | memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); | ||
584 | } | 749 | } |
585 | 750 | ||
586 | static int atmel_sha_finish(struct ahash_request *req) | 751 | static int atmel_sha_finish(struct ahash_request *req) |
@@ -589,11 +754,11 @@ static int atmel_sha_finish(struct ahash_request *req) | |||
589 | struct atmel_sha_dev *dd = ctx->dd; | 754 | struct atmel_sha_dev *dd = ctx->dd; |
590 | int err = 0; | 755 | int err = 0; |
591 | 756 | ||
592 | if (ctx->digcnt) | 757 | if (ctx->digcnt[0] || ctx->digcnt[1]) |
593 | atmel_sha_copy_ready_hash(req); | 758 | atmel_sha_copy_ready_hash(req); |
594 | 759 | ||
595 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, | 760 | dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], |
596 | ctx->bufcnt); | 761 | ctx->digcnt[0], ctx->bufcnt); |
597 | 762 | ||
598 | return err; | 763 | return err; |
599 | } | 764 | } |
@@ -628,9 +793,8 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | |||
628 | { | 793 | { |
629 | clk_prepare_enable(dd->iclk); | 794 | clk_prepare_enable(dd->iclk); |
630 | 795 | ||
631 | if (SHA_FLAGS_INIT & dd->flags) { | 796 | if (!(SHA_FLAGS_INIT & dd->flags)) { |
632 | atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); | 797 | atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); |
633 | atmel_sha_dualbuff_test(dd); | ||
634 | dd->flags |= SHA_FLAGS_INIT; | 798 | dd->flags |= SHA_FLAGS_INIT; |
635 | dd->err = 0; | 799 | dd->err = 0; |
636 | } | 800 | } |
@@ -638,6 +802,23 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | |||
638 | return 0; | 802 | return 0; |
639 | } | 803 | } |
640 | 804 | ||
805 | static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) | ||
806 | { | ||
807 | return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; | ||
808 | } | ||
809 | |||
810 | static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) | ||
811 | { | ||
812 | atmel_sha_hw_init(dd); | ||
813 | |||
814 | dd->hw_version = atmel_sha_get_version(dd); | ||
815 | |||
816 | dev_info(dd->dev, | ||
817 | "version: 0x%x\n", dd->hw_version); | ||
818 | |||
819 | clk_disable_unprepare(dd->iclk); | ||
820 | } | ||
821 | |||
641 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, | 822 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, |
642 | struct ahash_request *req) | 823 | struct ahash_request *req) |
643 | { | 824 | { |
@@ -682,10 +863,9 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, | |||
682 | 863 | ||
683 | if (ctx->op == SHA_OP_UPDATE) { | 864 | if (ctx->op == SHA_OP_UPDATE) { |
684 | err = atmel_sha_update_req(dd); | 865 | err = atmel_sha_update_req(dd); |
685 | if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) { | 866 | if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) |
686 | /* no final() after finup() */ | 867 | /* no final() after finup() */ |
687 | err = atmel_sha_final_req(dd); | 868 | err = atmel_sha_final_req(dd); |
688 | } | ||
689 | } else if (ctx->op == SHA_OP_FINAL) { | 869 | } else if (ctx->op == SHA_OP_FINAL) { |
690 | err = atmel_sha_final_req(dd); | 870 | err = atmel_sha_final_req(dd); |
691 | } | 871 | } |
@@ -808,7 +988,7 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
808 | } | 988 | } |
809 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 989 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
810 | sizeof(struct atmel_sha_reqctx) + | 990 | sizeof(struct atmel_sha_reqctx) + |
811 | SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); | 991 | SHA_BUFFER_LEN + SHA512_BLOCK_SIZE); |
812 | 992 | ||
813 | return 0; | 993 | return 0; |
814 | } | 994 | } |
@@ -826,7 +1006,7 @@ static void atmel_sha_cra_exit(struct crypto_tfm *tfm) | |||
826 | tctx->fallback = NULL; | 1006 | tctx->fallback = NULL; |
827 | } | 1007 | } |
828 | 1008 | ||
829 | static struct ahash_alg sha_algs[] = { | 1009 | static struct ahash_alg sha_1_256_algs[] = { |
830 | { | 1010 | { |
831 | .init = atmel_sha_init, | 1011 | .init = atmel_sha_init, |
832 | .update = atmel_sha_update, | 1012 | .update = atmel_sha_update, |
@@ -875,6 +1055,79 @@ static struct ahash_alg sha_algs[] = { | |||
875 | }, | 1055 | }, |
876 | }; | 1056 | }; |
877 | 1057 | ||
1058 | static struct ahash_alg sha_224_alg = { | ||
1059 | .init = atmel_sha_init, | ||
1060 | .update = atmel_sha_update, | ||
1061 | .final = atmel_sha_final, | ||
1062 | .finup = atmel_sha_finup, | ||
1063 | .digest = atmel_sha_digest, | ||
1064 | .halg = { | ||
1065 | .digestsize = SHA224_DIGEST_SIZE, | ||
1066 | .base = { | ||
1067 | .cra_name = "sha224", | ||
1068 | .cra_driver_name = "atmel-sha224", | ||
1069 | .cra_priority = 100, | ||
1070 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1071 | CRYPTO_ALG_NEED_FALLBACK, | ||
1072 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
1073 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
1074 | .cra_alignmask = 0, | ||
1075 | .cra_module = THIS_MODULE, | ||
1076 | .cra_init = atmel_sha_cra_init, | ||
1077 | .cra_exit = atmel_sha_cra_exit, | ||
1078 | } | ||
1079 | } | ||
1080 | }; | ||
1081 | |||
1082 | static struct ahash_alg sha_384_512_algs[] = { | ||
1083 | { | ||
1084 | .init = atmel_sha_init, | ||
1085 | .update = atmel_sha_update, | ||
1086 | .final = atmel_sha_final, | ||
1087 | .finup = atmel_sha_finup, | ||
1088 | .digest = atmel_sha_digest, | ||
1089 | .halg = { | ||
1090 | .digestsize = SHA384_DIGEST_SIZE, | ||
1091 | .base = { | ||
1092 | .cra_name = "sha384", | ||
1093 | .cra_driver_name = "atmel-sha384", | ||
1094 | .cra_priority = 100, | ||
1095 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1096 | CRYPTO_ALG_NEED_FALLBACK, | ||
1097 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
1098 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
1099 | .cra_alignmask = 0x3, | ||
1100 | .cra_module = THIS_MODULE, | ||
1101 | .cra_init = atmel_sha_cra_init, | ||
1102 | .cra_exit = atmel_sha_cra_exit, | ||
1103 | } | ||
1104 | } | ||
1105 | }, | ||
1106 | { | ||
1107 | .init = atmel_sha_init, | ||
1108 | .update = atmel_sha_update, | ||
1109 | .final = atmel_sha_final, | ||
1110 | .finup = atmel_sha_finup, | ||
1111 | .digest = atmel_sha_digest, | ||
1112 | .halg = { | ||
1113 | .digestsize = SHA512_DIGEST_SIZE, | ||
1114 | .base = { | ||
1115 | .cra_name = "sha512", | ||
1116 | .cra_driver_name = "atmel-sha512", | ||
1117 | .cra_priority = 100, | ||
1118 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1119 | CRYPTO_ALG_NEED_FALLBACK, | ||
1120 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
1121 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
1122 | .cra_alignmask = 0x3, | ||
1123 | .cra_module = THIS_MODULE, | ||
1124 | .cra_init = atmel_sha_cra_init, | ||
1125 | .cra_exit = atmel_sha_cra_exit, | ||
1126 | } | ||
1127 | } | ||
1128 | }, | ||
1129 | }; | ||
1130 | |||
878 | static void atmel_sha_done_task(unsigned long data) | 1131 | static void atmel_sha_done_task(unsigned long data) |
879 | { | 1132 | { |
880 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; | 1133 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; |
@@ -941,32 +1194,142 @@ static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) | |||
941 | { | 1194 | { |
942 | int i; | 1195 | int i; |
943 | 1196 | ||
944 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) | 1197 | for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) |
945 | crypto_unregister_ahash(&sha_algs[i]); | 1198 | crypto_unregister_ahash(&sha_1_256_algs[i]); |
1199 | |||
1200 | if (dd->caps.has_sha224) | ||
1201 | crypto_unregister_ahash(&sha_224_alg); | ||
1202 | |||
1203 | if (dd->caps.has_sha_384_512) { | ||
1204 | for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) | ||
1205 | crypto_unregister_ahash(&sha_384_512_algs[i]); | ||
1206 | } | ||
946 | } | 1207 | } |
947 | 1208 | ||
948 | static int atmel_sha_register_algs(struct atmel_sha_dev *dd) | 1209 | static int atmel_sha_register_algs(struct atmel_sha_dev *dd) |
949 | { | 1210 | { |
950 | int err, i, j; | 1211 | int err, i, j; |
951 | 1212 | ||
952 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { | 1213 | for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { |
953 | err = crypto_register_ahash(&sha_algs[i]); | 1214 | err = crypto_register_ahash(&sha_1_256_algs[i]); |
954 | if (err) | 1215 | if (err) |
955 | goto err_sha_algs; | 1216 | goto err_sha_1_256_algs; |
1217 | } | ||
1218 | |||
1219 | if (dd->caps.has_sha224) { | ||
1220 | err = crypto_register_ahash(&sha_224_alg); | ||
1221 | if (err) | ||
1222 | goto err_sha_224_algs; | ||
1223 | } | ||
1224 | |||
1225 | if (dd->caps.has_sha_384_512) { | ||
1226 | for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { | ||
1227 | err = crypto_register_ahash(&sha_384_512_algs[i]); | ||
1228 | if (err) | ||
1229 | goto err_sha_384_512_algs; | ||
1230 | } | ||
956 | } | 1231 | } |
957 | 1232 | ||
958 | return 0; | 1233 | return 0; |
959 | 1234 | ||
960 | err_sha_algs: | 1235 | err_sha_384_512_algs: |
1236 | for (j = 0; j < i; j++) | ||
1237 | crypto_unregister_ahash(&sha_384_512_algs[j]); | ||
1238 | crypto_unregister_ahash(&sha_224_alg); | ||
1239 | err_sha_224_algs: | ||
1240 | i = ARRAY_SIZE(sha_1_256_algs); | ||
1241 | err_sha_1_256_algs: | ||
961 | for (j = 0; j < i; j++) | 1242 | for (j = 0; j < i; j++) |
962 | crypto_unregister_ahash(&sha_algs[j]); | 1243 | crypto_unregister_ahash(&sha_1_256_algs[j]); |
963 | 1244 | ||
964 | return err; | 1245 | return err; |
965 | } | 1246 | } |
966 | 1247 | ||
1248 | static bool atmel_sha_filter(struct dma_chan *chan, void *slave) | ||
1249 | { | ||
1250 | struct at_dma_slave *sl = slave; | ||
1251 | |||
1252 | if (sl && sl->dma_dev == chan->device->dev) { | ||
1253 | chan->private = sl; | ||
1254 | return true; | ||
1255 | } else { | ||
1256 | return false; | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | static int atmel_sha_dma_init(struct atmel_sha_dev *dd, | ||
1261 | struct crypto_platform_data *pdata) | ||
1262 | { | ||
1263 | int err = -ENOMEM; | ||
1264 | dma_cap_mask_t mask_in; | ||
1265 | |||
1266 | if (pdata && pdata->dma_slave->rxdata.dma_dev) { | ||
1267 | /* Try to grab DMA channel */ | ||
1268 | dma_cap_zero(mask_in); | ||
1269 | dma_cap_set(DMA_SLAVE, mask_in); | ||
1270 | |||
1271 | dd->dma_lch_in.chan = dma_request_channel(mask_in, | ||
1272 | atmel_sha_filter, &pdata->dma_slave->rxdata); | ||
1273 | |||
1274 | if (!dd->dma_lch_in.chan) | ||
1275 | return err; | ||
1276 | |||
1277 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | ||
1278 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | ||
1279 | SHA_REG_DIN(0); | ||
1280 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | ||
1281 | dd->dma_lch_in.dma_conf.src_addr_width = | ||
1282 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1283 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
1284 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
1285 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1286 | dd->dma_lch_in.dma_conf.device_fc = false; | ||
1287 | |||
1288 | return 0; | ||
1289 | } | ||
1290 | |||
1291 | return -ENODEV; | ||
1292 | } | ||
1293 | |||
1294 | static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd) | ||
1295 | { | ||
1296 | dma_release_channel(dd->dma_lch_in.chan); | ||
1297 | } | ||
1298 | |||
1299 | static void atmel_sha_get_cap(struct atmel_sha_dev *dd) | ||
1300 | { | ||
1301 | |||
1302 | dd->caps.has_dma = 0; | ||
1303 | dd->caps.has_dualbuff = 0; | ||
1304 | dd->caps.has_sha224 = 0; | ||
1305 | dd->caps.has_sha_384_512 = 0; | ||
1306 | |||
1307 | /* keep only major version number */ | ||
1308 | switch (dd->hw_version & 0xff0) { | ||
1309 | case 0x410: | ||
1310 | dd->caps.has_dma = 1; | ||
1311 | dd->caps.has_dualbuff = 1; | ||
1312 | dd->caps.has_sha224 = 1; | ||
1313 | dd->caps.has_sha_384_512 = 1; | ||
1314 | break; | ||
1315 | case 0x400: | ||
1316 | dd->caps.has_dma = 1; | ||
1317 | dd->caps.has_dualbuff = 1; | ||
1318 | dd->caps.has_sha224 = 1; | ||
1319 | break; | ||
1320 | case 0x320: | ||
1321 | break; | ||
1322 | default: | ||
1323 | dev_warn(dd->dev, | ||
1324 | "Unmanaged sha version, set minimum capabilities\n"); | ||
1325 | break; | ||
1326 | } | ||
1327 | } | ||
1328 | |||
967 | static int atmel_sha_probe(struct platform_device *pdev) | 1329 | static int atmel_sha_probe(struct platform_device *pdev) |
968 | { | 1330 | { |
969 | struct atmel_sha_dev *sha_dd; | 1331 | struct atmel_sha_dev *sha_dd; |
1332 | struct crypto_platform_data *pdata; | ||
970 | struct device *dev = &pdev->dev; | 1333 | struct device *dev = &pdev->dev; |
971 | struct resource *sha_res; | 1334 | struct resource *sha_res; |
972 | unsigned long sha_phys_size; | 1335 | unsigned long sha_phys_size; |
@@ -1018,7 +1381,7 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
1018 | } | 1381 | } |
1019 | 1382 | ||
1020 | /* Initializing the clock */ | 1383 | /* Initializing the clock */ |
1021 | sha_dd->iclk = clk_get(&pdev->dev, NULL); | 1384 | sha_dd->iclk = clk_get(&pdev->dev, "sha_clk"); |
1022 | if (IS_ERR(sha_dd->iclk)) { | 1385 | if (IS_ERR(sha_dd->iclk)) { |
1023 | dev_err(dev, "clock intialization failed.\n"); | 1386 | dev_err(dev, "clock intialization failed.\n"); |
1024 | err = PTR_ERR(sha_dd->iclk); | 1387 | err = PTR_ERR(sha_dd->iclk); |
@@ -1032,6 +1395,22 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
1032 | goto sha_io_err; | 1395 | goto sha_io_err; |
1033 | } | 1396 | } |
1034 | 1397 | ||
1398 | atmel_sha_hw_version_init(sha_dd); | ||
1399 | |||
1400 | atmel_sha_get_cap(sha_dd); | ||
1401 | |||
1402 | if (sha_dd->caps.has_dma) { | ||
1403 | pdata = pdev->dev.platform_data; | ||
1404 | if (!pdata) { | ||
1405 | dev_err(&pdev->dev, "platform data not available\n"); | ||
1406 | err = -ENXIO; | ||
1407 | goto err_pdata; | ||
1408 | } | ||
1409 | err = atmel_sha_dma_init(sha_dd, pdata); | ||
1410 | if (err) | ||
1411 | goto err_sha_dma; | ||
1412 | } | ||
1413 | |||
1035 | spin_lock(&atmel_sha.lock); | 1414 | spin_lock(&atmel_sha.lock); |
1036 | list_add_tail(&sha_dd->list, &atmel_sha.dev_list); | 1415 | list_add_tail(&sha_dd->list, &atmel_sha.dev_list); |
1037 | spin_unlock(&atmel_sha.lock); | 1416 | spin_unlock(&atmel_sha.lock); |
@@ -1048,6 +1427,10 @@ err_algs: | |||
1048 | spin_lock(&atmel_sha.lock); | 1427 | spin_lock(&atmel_sha.lock); |
1049 | list_del(&sha_dd->list); | 1428 | list_del(&sha_dd->list); |
1050 | spin_unlock(&atmel_sha.lock); | 1429 | spin_unlock(&atmel_sha.lock); |
1430 | if (sha_dd->caps.has_dma) | ||
1431 | atmel_sha_dma_cleanup(sha_dd); | ||
1432 | err_sha_dma: | ||
1433 | err_pdata: | ||
1051 | iounmap(sha_dd->io_base); | 1434 | iounmap(sha_dd->io_base); |
1052 | sha_io_err: | 1435 | sha_io_err: |
1053 | clk_put(sha_dd->iclk); | 1436 | clk_put(sha_dd->iclk); |
@@ -1078,6 +1461,9 @@ static int atmel_sha_remove(struct platform_device *pdev) | |||
1078 | 1461 | ||
1079 | tasklet_kill(&sha_dd->done_task); | 1462 | tasklet_kill(&sha_dd->done_task); |
1080 | 1463 | ||
1464 | if (sha_dd->caps.has_dma) | ||
1465 | atmel_sha_dma_cleanup(sha_dd); | ||
1466 | |||
1081 | iounmap(sha_dd->io_base); | 1467 | iounmap(sha_dd->io_base); |
1082 | 1468 | ||
1083 | clk_put(sha_dd->iclk); | 1469 | clk_put(sha_dd->iclk); |
@@ -1102,6 +1488,6 @@ static struct platform_driver atmel_sha_driver = { | |||
1102 | 1488 | ||
1103 | module_platform_driver(atmel_sha_driver); | 1489 | module_platform_driver(atmel_sha_driver); |
1104 | 1490 | ||
1105 | MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support."); | 1491 | MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support."); |
1106 | MODULE_LICENSE("GPL v2"); | 1492 | MODULE_LICENSE("GPL v2"); |
1107 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); | 1493 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); |