diff options
author | Nicolas Royer <nicolas@eukrea.com> | 2013-02-20 11:10:24 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2013-03-10 04:46:42 -0400 |
commit | cadc4ab8f6f73719ef0e124320cdd210d1c9ff3e (patch) | |
tree | 9c0da973151be6e1e10d88376735a81859a55346 /drivers/crypto | |
parent | 6150f3bc0b4f94f0eea3e32b4e7462025e4bd972 (diff) |
crypto: atmel-aes - add support for latest release of the IP (0x130)
Updates from previous IP release (0x120):
- add cfb64 support
- add DMA double input buffer support
Signed-off-by: Nicolas Royer <nicolas@eukrea.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Eric Bénard <eric@eukrea.com>
Tested-by: Eric Bénard <eric@eukrea.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/atmel-aes.c | 471 |
1 files changed, 353 insertions, 118 deletions
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 6f22ba51f969..c1efd910d97b 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <crypto/aes.h> | 38 | #include <crypto/aes.h> |
39 | #include <crypto/hash.h> | 39 | #include <crypto/hash.h> |
40 | #include <crypto/internal/hash.h> | 40 | #include <crypto/internal/hash.h> |
41 | #include <linux/platform_data/atmel-aes.h> | 41 | #include <linux/platform_data/crypto-atmel.h> |
42 | #include "atmel-aes-regs.h" | 42 | #include "atmel-aes-regs.h" |
43 | 43 | ||
44 | #define CFB8_BLOCK_SIZE 1 | 44 | #define CFB8_BLOCK_SIZE 1 |
@@ -47,7 +47,7 @@ | |||
47 | #define CFB64_BLOCK_SIZE 8 | 47 | #define CFB64_BLOCK_SIZE 8 |
48 | 48 | ||
49 | /* AES flags */ | 49 | /* AES flags */ |
50 | #define AES_FLAGS_MODE_MASK 0x01ff | 50 | #define AES_FLAGS_MODE_MASK 0x03ff |
51 | #define AES_FLAGS_ENCRYPT BIT(0) | 51 | #define AES_FLAGS_ENCRYPT BIT(0) |
52 | #define AES_FLAGS_CBC BIT(1) | 52 | #define AES_FLAGS_CBC BIT(1) |
53 | #define AES_FLAGS_CFB BIT(2) | 53 | #define AES_FLAGS_CFB BIT(2) |
@@ -55,21 +55,26 @@ | |||
55 | #define AES_FLAGS_CFB16 BIT(4) | 55 | #define AES_FLAGS_CFB16 BIT(4) |
56 | #define AES_FLAGS_CFB32 BIT(5) | 56 | #define AES_FLAGS_CFB32 BIT(5) |
57 | #define AES_FLAGS_CFB64 BIT(6) | 57 | #define AES_FLAGS_CFB64 BIT(6) |
58 | #define AES_FLAGS_OFB BIT(7) | 58 | #define AES_FLAGS_CFB128 BIT(7) |
59 | #define AES_FLAGS_CTR BIT(8) | 59 | #define AES_FLAGS_OFB BIT(8) |
60 | #define AES_FLAGS_CTR BIT(9) | ||
60 | 61 | ||
61 | #define AES_FLAGS_INIT BIT(16) | 62 | #define AES_FLAGS_INIT BIT(16) |
62 | #define AES_FLAGS_DMA BIT(17) | 63 | #define AES_FLAGS_DMA BIT(17) |
63 | #define AES_FLAGS_BUSY BIT(18) | 64 | #define AES_FLAGS_BUSY BIT(18) |
65 | #define AES_FLAGS_FAST BIT(19) | ||
64 | 66 | ||
65 | #define AES_FLAGS_DUALBUFF BIT(24) | 67 | #define ATMEL_AES_QUEUE_LENGTH 50 |
66 | |||
67 | #define ATMEL_AES_QUEUE_LENGTH 1 | ||
68 | #define ATMEL_AES_CACHE_SIZE 0 | ||
69 | 68 | ||
70 | #define ATMEL_AES_DMA_THRESHOLD 16 | 69 | #define ATMEL_AES_DMA_THRESHOLD 16 |
71 | 70 | ||
72 | 71 | ||
72 | struct atmel_aes_caps { | ||
73 | bool has_dualbuff; | ||
74 | bool has_cfb64; | ||
75 | u32 max_burst_size; | ||
76 | }; | ||
77 | |||
73 | struct atmel_aes_dev; | 78 | struct atmel_aes_dev; |
74 | 79 | ||
75 | struct atmel_aes_ctx { | 80 | struct atmel_aes_ctx { |
@@ -77,6 +82,8 @@ struct atmel_aes_ctx { | |||
77 | 82 | ||
78 | int keylen; | 83 | int keylen; |
79 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | 84 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; |
85 | |||
86 | u16 block_size; | ||
80 | }; | 87 | }; |
81 | 88 | ||
82 | struct atmel_aes_reqctx { | 89 | struct atmel_aes_reqctx { |
@@ -112,20 +119,27 @@ struct atmel_aes_dev { | |||
112 | 119 | ||
113 | struct scatterlist *in_sg; | 120 | struct scatterlist *in_sg; |
114 | unsigned int nb_in_sg; | 121 | unsigned int nb_in_sg; |
115 | 122 | size_t in_offset; | |
116 | struct scatterlist *out_sg; | 123 | struct scatterlist *out_sg; |
117 | unsigned int nb_out_sg; | 124 | unsigned int nb_out_sg; |
125 | size_t out_offset; | ||
118 | 126 | ||
119 | size_t bufcnt; | 127 | size_t bufcnt; |
128 | size_t buflen; | ||
129 | size_t dma_size; | ||
120 | 130 | ||
121 | u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32)); | 131 | void *buf_in; |
122 | int dma_in; | 132 | int dma_in; |
133 | dma_addr_t dma_addr_in; | ||
123 | struct atmel_aes_dma dma_lch_in; | 134 | struct atmel_aes_dma dma_lch_in; |
124 | 135 | ||
125 | u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32)); | 136 | void *buf_out; |
126 | int dma_out; | 137 | int dma_out; |
138 | dma_addr_t dma_addr_out; | ||
127 | struct atmel_aes_dma dma_lch_out; | 139 | struct atmel_aes_dma dma_lch_out; |
128 | 140 | ||
141 | struct atmel_aes_caps caps; | ||
142 | |||
129 | u32 hw_version; | 143 | u32 hw_version; |
130 | }; | 144 | }; |
131 | 145 | ||
@@ -165,6 +179,37 @@ static int atmel_aes_sg_length(struct ablkcipher_request *req, | |||
165 | return sg_nb; | 179 | return sg_nb; |
166 | } | 180 | } |
167 | 181 | ||
182 | static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset, | ||
183 | void *buf, size_t buflen, size_t total, int out) | ||
184 | { | ||
185 | unsigned int count, off = 0; | ||
186 | |||
187 | while (buflen && total) { | ||
188 | count = min((*sg)->length - *offset, total); | ||
189 | count = min(count, buflen); | ||
190 | |||
191 | if (!count) | ||
192 | return off; | ||
193 | |||
194 | scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); | ||
195 | |||
196 | off += count; | ||
197 | buflen -= count; | ||
198 | *offset += count; | ||
199 | total -= count; | ||
200 | |||
201 | if (*offset == (*sg)->length) { | ||
202 | *sg = sg_next(*sg); | ||
203 | if (*sg) | ||
204 | *offset = 0; | ||
205 | else | ||
206 | total = 0; | ||
207 | } | ||
208 | } | ||
209 | |||
210 | return off; | ||
211 | } | ||
212 | |||
168 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) | 213 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) |
169 | { | 214 | { |
170 | return readl_relaxed(dd->io_base + offset); | 215 | return readl_relaxed(dd->io_base + offset); |
@@ -190,14 +235,6 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, | |||
190 | atmel_aes_write(dd, offset, *value); | 235 | atmel_aes_write(dd, offset, *value); |
191 | } | 236 | } |
192 | 237 | ||
193 | static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd) | ||
194 | { | ||
195 | atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF); | ||
196 | |||
197 | if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF) | ||
198 | dd->flags |= AES_FLAGS_DUALBUFF; | ||
199 | } | ||
200 | |||
201 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) | 238 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) |
202 | { | 239 | { |
203 | struct atmel_aes_dev *aes_dd = NULL; | 240 | struct atmel_aes_dev *aes_dd = NULL; |
@@ -225,7 +262,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | |||
225 | 262 | ||
226 | if (!(dd->flags & AES_FLAGS_INIT)) { | 263 | if (!(dd->flags & AES_FLAGS_INIT)) { |
227 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); | 264 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); |
228 | atmel_aes_dualbuff_test(dd); | 265 | atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); |
229 | dd->flags |= AES_FLAGS_INIT; | 266 | dd->flags |= AES_FLAGS_INIT; |
230 | dd->err = 0; | 267 | dd->err = 0; |
231 | } | 268 | } |
@@ -233,11 +270,19 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | |||
233 | return 0; | 270 | return 0; |
234 | } | 271 | } |
235 | 272 | ||
273 | static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) | ||
274 | { | ||
275 | return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; | ||
276 | } | ||
277 | |||
236 | static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) | 278 | static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) |
237 | { | 279 | { |
238 | atmel_aes_hw_init(dd); | 280 | atmel_aes_hw_init(dd); |
239 | 281 | ||
240 | dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION); | 282 | dd->hw_version = atmel_aes_get_version(dd); |
283 | |||
284 | dev_info(dd->dev, | ||
285 | "version: 0x%x\n", dd->hw_version); | ||
241 | 286 | ||
242 | clk_disable_unprepare(dd->iclk); | 287 | clk_disable_unprepare(dd->iclk); |
243 | } | 288 | } |
@@ -260,50 +305,77 @@ static void atmel_aes_dma_callback(void *data) | |||
260 | tasklet_schedule(&dd->done_task); | 305 | tasklet_schedule(&dd->done_task); |
261 | } | 306 | } |
262 | 307 | ||
263 | static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd) | 308 | static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, |
309 | dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length) | ||
264 | { | 310 | { |
311 | struct scatterlist sg[2]; | ||
265 | struct dma_async_tx_descriptor *in_desc, *out_desc; | 312 | struct dma_async_tx_descriptor *in_desc, *out_desc; |
266 | int nb_dma_sg_in, nb_dma_sg_out; | ||
267 | 313 | ||
268 | dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); | 314 | dd->dma_size = length; |
269 | if (!dd->nb_in_sg) | ||
270 | goto exit_err; | ||
271 | 315 | ||
272 | nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg, | 316 | if (!(dd->flags & AES_FLAGS_FAST)) { |
273 | DMA_TO_DEVICE); | 317 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, |
274 | if (!nb_dma_sg_in) | 318 | DMA_TO_DEVICE); |
275 | goto exit_err; | 319 | } |
276 | 320 | ||
277 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg, | 321 | if (dd->flags & AES_FLAGS_CFB8) { |
278 | nb_dma_sg_in, DMA_MEM_TO_DEV, | 322 | dd->dma_lch_in.dma_conf.dst_addr_width = |
279 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 323 | DMA_SLAVE_BUSWIDTH_1_BYTE; |
324 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
325 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
326 | } else if (dd->flags & AES_FLAGS_CFB16) { | ||
327 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
328 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
329 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
330 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
331 | } else { | ||
332 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
333 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
334 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
335 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
336 | } | ||
280 | 337 | ||
281 | if (!in_desc) | 338 | if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 | |
282 | goto unmap_in; | 339 | AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) { |
340 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | ||
341 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
342 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | ||
343 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | ||
344 | } else { | ||
345 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
346 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
347 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
348 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
349 | } | ||
283 | 350 | ||
284 | /* callback not needed */ | 351 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); |
352 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | ||
285 | 353 | ||
286 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | 354 | dd->flags |= AES_FLAGS_DMA; |
287 | if (!dd->nb_out_sg) | ||
288 | goto unmap_in; | ||
289 | 355 | ||
290 | nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg, | 356 | sg_init_table(&sg[0], 1); |
291 | DMA_FROM_DEVICE); | 357 | sg_dma_address(&sg[0]) = dma_addr_in; |
292 | if (!nb_dma_sg_out) | 358 | sg_dma_len(&sg[0]) = length; |
293 | goto unmap_out; | ||
294 | 359 | ||
295 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg, | 360 | sg_init_table(&sg[1], 1); |
296 | nb_dma_sg_out, DMA_DEV_TO_MEM, | 361 | sg_dma_address(&sg[1]) = dma_addr_out; |
297 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 362 | sg_dma_len(&sg[1]) = length; |
363 | |||
364 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], | ||
365 | 1, DMA_MEM_TO_DEV, | ||
366 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
367 | if (!in_desc) | ||
368 | return -EINVAL; | ||
298 | 369 | ||
370 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], | ||
371 | 1, DMA_DEV_TO_MEM, | ||
372 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
299 | if (!out_desc) | 373 | if (!out_desc) |
300 | goto unmap_out; | 374 | return -EINVAL; |
301 | 375 | ||
302 | out_desc->callback = atmel_aes_dma_callback; | 376 | out_desc->callback = atmel_aes_dma_callback; |
303 | out_desc->callback_param = dd; | 377 | out_desc->callback_param = dd; |
304 | 378 | ||
305 | dd->total -= dd->req->nbytes; | ||
306 | |||
307 | dmaengine_submit(out_desc); | 379 | dmaengine_submit(out_desc); |
308 | dma_async_issue_pending(dd->dma_lch_out.chan); | 380 | dma_async_issue_pending(dd->dma_lch_out.chan); |
309 | 381 | ||
@@ -311,15 +383,6 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd) | |||
311 | dma_async_issue_pending(dd->dma_lch_in.chan); | 383 | dma_async_issue_pending(dd->dma_lch_in.chan); |
312 | 384 | ||
313 | return 0; | 385 | return 0; |
314 | |||
315 | unmap_out: | ||
316 | dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg, | ||
317 | DMA_FROM_DEVICE); | ||
318 | unmap_in: | ||
319 | dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg, | ||
320 | DMA_TO_DEVICE); | ||
321 | exit_err: | ||
322 | return -EINVAL; | ||
323 | } | 386 | } |
324 | 387 | ||
325 | static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | 388 | static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) |
@@ -352,30 +415,66 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | |||
352 | 415 | ||
353 | static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) | 416 | static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) |
354 | { | 417 | { |
355 | int err; | 418 | int err, fast = 0, in, out; |
419 | size_t count; | ||
420 | dma_addr_t addr_in, addr_out; | ||
421 | |||
422 | if ((!dd->in_offset) && (!dd->out_offset)) { | ||
423 | /* check for alignment */ | ||
424 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && | ||
425 | IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); | ||
426 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && | ||
427 | IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); | ||
428 | fast = in && out; | ||
429 | |||
430 | if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) | ||
431 | fast = 0; | ||
432 | } | ||
433 | |||
434 | |||
435 | if (fast) { | ||
436 | count = min(dd->total, sg_dma_len(dd->in_sg)); | ||
437 | count = min(count, sg_dma_len(dd->out_sg)); | ||
438 | |||
439 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
440 | if (!err) { | ||
441 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | |||
445 | err = dma_map_sg(dd->dev, dd->out_sg, 1, | ||
446 | DMA_FROM_DEVICE); | ||
447 | if (!err) { | ||
448 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
449 | dma_unmap_sg(dd->dev, dd->in_sg, 1, | ||
450 | DMA_TO_DEVICE); | ||
451 | return -EINVAL; | ||
452 | } | ||
453 | |||
454 | addr_in = sg_dma_address(dd->in_sg); | ||
455 | addr_out = sg_dma_address(dd->out_sg); | ||
456 | |||
457 | dd->flags |= AES_FLAGS_FAST; | ||
356 | 458 | ||
357 | if (dd->flags & AES_FLAGS_CFB8) { | ||
358 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
359 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
360 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
361 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
362 | } else if (dd->flags & AES_FLAGS_CFB16) { | ||
363 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
364 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
365 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
366 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
367 | } else { | 459 | } else { |
368 | dd->dma_lch_in.dma_conf.dst_addr_width = | 460 | /* use cache buffers */ |
369 | DMA_SLAVE_BUSWIDTH_4_BYTES; | 461 | count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, |
370 | dd->dma_lch_out.dma_conf.src_addr_width = | 462 | dd->buf_in, dd->buflen, dd->total, 0); |
371 | DMA_SLAVE_BUSWIDTH_4_BYTES; | 463 | |
464 | addr_in = dd->dma_addr_in; | ||
465 | addr_out = dd->dma_addr_out; | ||
466 | |||
467 | dd->flags &= ~AES_FLAGS_FAST; | ||
372 | } | 468 | } |
373 | 469 | ||
374 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | 470 | dd->total -= count; |
375 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | ||
376 | 471 | ||
377 | dd->flags |= AES_FLAGS_DMA; | 472 | err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count); |
378 | err = atmel_aes_crypt_dma(dd); | 473 | |
474 | if (err && (dd->flags & AES_FLAGS_FAST)) { | ||
475 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
476 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | ||
477 | } | ||
379 | 478 | ||
380 | return err; | 479 | return err; |
381 | } | 480 | } |
@@ -410,6 +509,8 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) | |||
410 | valmr |= AES_MR_CFBS_32b; | 509 | valmr |= AES_MR_CFBS_32b; |
411 | else if (dd->flags & AES_FLAGS_CFB64) | 510 | else if (dd->flags & AES_FLAGS_CFB64) |
412 | valmr |= AES_MR_CFBS_64b; | 511 | valmr |= AES_MR_CFBS_64b; |
512 | else if (dd->flags & AES_FLAGS_CFB128) | ||
513 | valmr |= AES_MR_CFBS_128b; | ||
413 | } else if (dd->flags & AES_FLAGS_OFB) { | 514 | } else if (dd->flags & AES_FLAGS_OFB) { |
414 | valmr |= AES_MR_OPMOD_OFB; | 515 | valmr |= AES_MR_OPMOD_OFB; |
415 | } else if (dd->flags & AES_FLAGS_CTR) { | 516 | } else if (dd->flags & AES_FLAGS_CTR) { |
@@ -423,7 +524,7 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) | |||
423 | 524 | ||
424 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) { | 525 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) { |
425 | valmr |= AES_MR_SMOD_IDATAR0; | 526 | valmr |= AES_MR_SMOD_IDATAR0; |
426 | if (dd->flags & AES_FLAGS_DUALBUFF) | 527 | if (dd->caps.has_dualbuff) |
427 | valmr |= AES_MR_DUALBUFF; | 528 | valmr |= AES_MR_DUALBUFF; |
428 | } else { | 529 | } else { |
429 | valmr |= AES_MR_SMOD_AUTO; | 530 | valmr |= AES_MR_SMOD_AUTO; |
@@ -477,7 +578,9 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | |||
477 | /* assign new request to device */ | 578 | /* assign new request to device */ |
478 | dd->req = req; | 579 | dd->req = req; |
479 | dd->total = req->nbytes; | 580 | dd->total = req->nbytes; |
581 | dd->in_offset = 0; | ||
480 | dd->in_sg = req->src; | 582 | dd->in_sg = req->src; |
583 | dd->out_offset = 0; | ||
481 | dd->out_sg = req->dst; | 584 | dd->out_sg = req->dst; |
482 | 585 | ||
483 | rctx = ablkcipher_request_ctx(req); | 586 | rctx = ablkcipher_request_ctx(req); |
@@ -506,18 +609,86 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | |||
506 | static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) | 609 | static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) |
507 | { | 610 | { |
508 | int err = -EINVAL; | 611 | int err = -EINVAL; |
612 | size_t count; | ||
509 | 613 | ||
510 | if (dd->flags & AES_FLAGS_DMA) { | 614 | if (dd->flags & AES_FLAGS_DMA) { |
511 | dma_unmap_sg(dd->dev, dd->out_sg, | ||
512 | dd->nb_out_sg, DMA_FROM_DEVICE); | ||
513 | dma_unmap_sg(dd->dev, dd->in_sg, | ||
514 | dd->nb_in_sg, DMA_TO_DEVICE); | ||
515 | err = 0; | 615 | err = 0; |
616 | if (dd->flags & AES_FLAGS_FAST) { | ||
617 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
618 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
619 | } else { | ||
620 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | ||
621 | dd->dma_size, DMA_FROM_DEVICE); | ||
622 | |||
623 | /* copy data */ | ||
624 | count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, | ||
625 | dd->buf_out, dd->buflen, dd->dma_size, 1); | ||
626 | if (count != dd->dma_size) { | ||
627 | err = -EINVAL; | ||
628 | pr_err("not all data converted: %u\n", count); | ||
629 | } | ||
630 | } | ||
516 | } | 631 | } |
517 | 632 | ||
518 | return err; | 633 | return err; |
519 | } | 634 | } |
520 | 635 | ||
636 | |||
637 | static int atmel_aes_buff_init(struct atmel_aes_dev *dd) | ||
638 | { | ||
639 | int err = -ENOMEM; | ||
640 | |||
641 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); | ||
642 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); | ||
643 | dd->buflen = PAGE_SIZE; | ||
644 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | ||
645 | |||
646 | if (!dd->buf_in || !dd->buf_out) { | ||
647 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
648 | goto err_alloc; | ||
649 | } | ||
650 | |||
651 | /* MAP here */ | ||
652 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, | ||
653 | dd->buflen, DMA_TO_DEVICE); | ||
654 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
655 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
656 | err = -EINVAL; | ||
657 | goto err_map_in; | ||
658 | } | ||
659 | |||
660 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, | ||
661 | dd->buflen, DMA_FROM_DEVICE); | ||
662 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | ||
663 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
664 | err = -EINVAL; | ||
665 | goto err_map_out; | ||
666 | } | ||
667 | |||
668 | return 0; | ||
669 | |||
670 | err_map_out: | ||
671 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
672 | DMA_TO_DEVICE); | ||
673 | err_map_in: | ||
674 | free_page((unsigned long)dd->buf_out); | ||
675 | free_page((unsigned long)dd->buf_in); | ||
676 | err_alloc: | ||
677 | if (err) | ||
678 | pr_err("error: %d\n", err); | ||
679 | return err; | ||
680 | } | ||
681 | |||
682 | static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) | ||
683 | { | ||
684 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
685 | DMA_FROM_DEVICE); | ||
686 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
687 | DMA_TO_DEVICE); | ||
688 | free_page((unsigned long)dd->buf_out); | ||
689 | free_page((unsigned long)dd->buf_in); | ||
690 | } | ||
691 | |||
521 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | 692 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) |
522 | { | 693 | { |
523 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( | 694 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( |
@@ -525,9 +696,30 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
525 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | 696 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); |
526 | struct atmel_aes_dev *dd; | 697 | struct atmel_aes_dev *dd; |
527 | 698 | ||
528 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | 699 | if (mode & AES_FLAGS_CFB8) { |
529 | pr_err("request size is not exact amount of AES blocks\n"); | 700 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { |
530 | return -EINVAL; | 701 | pr_err("request size is not exact amount of CFB8 blocks\n"); |
702 | return -EINVAL; | ||
703 | } | ||
704 | ctx->block_size = CFB8_BLOCK_SIZE; | ||
705 | } else if (mode & AES_FLAGS_CFB16) { | ||
706 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { | ||
707 | pr_err("request size is not exact amount of CFB16 blocks\n"); | ||
708 | return -EINVAL; | ||
709 | } | ||
710 | ctx->block_size = CFB16_BLOCK_SIZE; | ||
711 | } else if (mode & AES_FLAGS_CFB32) { | ||
712 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { | ||
713 | pr_err("request size is not exact amount of CFB32 blocks\n"); | ||
714 | return -EINVAL; | ||
715 | } | ||
716 | ctx->block_size = CFB32_BLOCK_SIZE; | ||
717 | } else { | ||
718 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
719 | pr_err("request size is not exact amount of AES blocks\n"); | ||
720 | return -EINVAL; | ||
721 | } | ||
722 | ctx->block_size = AES_BLOCK_SIZE; | ||
531 | } | 723 | } |
532 | 724 | ||
533 | dd = atmel_aes_find_dev(ctx); | 725 | dd = atmel_aes_find_dev(ctx); |
@@ -551,14 +743,12 @@ static bool atmel_aes_filter(struct dma_chan *chan, void *slave) | |||
551 | } | 743 | } |
552 | } | 744 | } |
553 | 745 | ||
554 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd) | 746 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd, |
747 | struct crypto_platform_data *pdata) | ||
555 | { | 748 | { |
556 | int err = -ENOMEM; | 749 | int err = -ENOMEM; |
557 | struct aes_platform_data *pdata; | ||
558 | dma_cap_mask_t mask_in, mask_out; | 750 | dma_cap_mask_t mask_in, mask_out; |
559 | 751 | ||
560 | pdata = dd->dev->platform_data; | ||
561 | |||
562 | if (pdata && pdata->dma_slave->txdata.dma_dev && | 752 | if (pdata && pdata->dma_slave->txdata.dma_dev && |
563 | pdata->dma_slave->rxdata.dma_dev) { | 753 | pdata->dma_slave->rxdata.dma_dev) { |
564 | 754 | ||
@@ -568,28 +758,38 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd) | |||
568 | 758 | ||
569 | dd->dma_lch_in.chan = dma_request_channel(mask_in, | 759 | dd->dma_lch_in.chan = dma_request_channel(mask_in, |
570 | atmel_aes_filter, &pdata->dma_slave->rxdata); | 760 | atmel_aes_filter, &pdata->dma_slave->rxdata); |
761 | |||
571 | if (!dd->dma_lch_in.chan) | 762 | if (!dd->dma_lch_in.chan) |
572 | goto err_dma_in; | 763 | goto err_dma_in; |
573 | 764 | ||
574 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | 765 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; |
575 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | 766 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + |
576 | AES_IDATAR(0); | 767 | AES_IDATAR(0); |
577 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | 768 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; |
578 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | 769 | dd->dma_lch_in.dma_conf.src_addr_width = |
770 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
771 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
772 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
773 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
579 | dd->dma_lch_in.dma_conf.device_fc = false; | 774 | dd->dma_lch_in.dma_conf.device_fc = false; |
580 | 775 | ||
581 | dma_cap_zero(mask_out); | 776 | dma_cap_zero(mask_out); |
582 | dma_cap_set(DMA_SLAVE, mask_out); | 777 | dma_cap_set(DMA_SLAVE, mask_out); |
583 | dd->dma_lch_out.chan = dma_request_channel(mask_out, | 778 | dd->dma_lch_out.chan = dma_request_channel(mask_out, |
584 | atmel_aes_filter, &pdata->dma_slave->txdata); | 779 | atmel_aes_filter, &pdata->dma_slave->txdata); |
780 | |||
585 | if (!dd->dma_lch_out.chan) | 781 | if (!dd->dma_lch_out.chan) |
586 | goto err_dma_out; | 782 | goto err_dma_out; |
587 | 783 | ||
588 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; | 784 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; |
589 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + | 785 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + |
590 | AES_ODATAR(0); | 786 | AES_ODATAR(0); |
591 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | 787 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; |
592 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | 788 | dd->dma_lch_out.dma_conf.src_addr_width = |
789 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
790 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
791 | dd->dma_lch_out.dma_conf.dst_addr_width = | ||
792 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
593 | dd->dma_lch_out.dma_conf.device_fc = false; | 793 | dd->dma_lch_out.dma_conf.device_fc = false; |
594 | 794 | ||
595 | return 0; | 795 | return 0; |
@@ -665,13 +865,13 @@ static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) | |||
665 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) | 865 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) |
666 | { | 866 | { |
667 | return atmel_aes_crypt(req, | 867 | return atmel_aes_crypt(req, |
668 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB); | 868 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128); |
669 | } | 869 | } |
670 | 870 | ||
671 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) | 871 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) |
672 | { | 872 | { |
673 | return atmel_aes_crypt(req, | 873 | return atmel_aes_crypt(req, |
674 | AES_FLAGS_CFB); | 874 | AES_FLAGS_CFB | AES_FLAGS_CFB128); |
675 | } | 875 | } |
676 | 876 | ||
677 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) | 877 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) |
@@ -753,7 +953,7 @@ static struct crypto_alg aes_algs[] = { | |||
753 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 953 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
754 | .cra_blocksize = AES_BLOCK_SIZE, | 954 | .cra_blocksize = AES_BLOCK_SIZE, |
755 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 955 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
756 | .cra_alignmask = 0x0, | 956 | .cra_alignmask = 0xf, |
757 | .cra_type = &crypto_ablkcipher_type, | 957 | .cra_type = &crypto_ablkcipher_type, |
758 | .cra_module = THIS_MODULE, | 958 | .cra_module = THIS_MODULE, |
759 | .cra_init = atmel_aes_cra_init, | 959 | .cra_init = atmel_aes_cra_init, |
@@ -773,7 +973,7 @@ static struct crypto_alg aes_algs[] = { | |||
773 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 973 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
774 | .cra_blocksize = AES_BLOCK_SIZE, | 974 | .cra_blocksize = AES_BLOCK_SIZE, |
775 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 975 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
776 | .cra_alignmask = 0x0, | 976 | .cra_alignmask = 0xf, |
777 | .cra_type = &crypto_ablkcipher_type, | 977 | .cra_type = &crypto_ablkcipher_type, |
778 | .cra_module = THIS_MODULE, | 978 | .cra_module = THIS_MODULE, |
779 | .cra_init = atmel_aes_cra_init, | 979 | .cra_init = atmel_aes_cra_init, |
@@ -794,7 +994,7 @@ static struct crypto_alg aes_algs[] = { | |||
794 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 994 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
795 | .cra_blocksize = AES_BLOCK_SIZE, | 995 | .cra_blocksize = AES_BLOCK_SIZE, |
796 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 996 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
797 | .cra_alignmask = 0x0, | 997 | .cra_alignmask = 0xf, |
798 | .cra_type = &crypto_ablkcipher_type, | 998 | .cra_type = &crypto_ablkcipher_type, |
799 | .cra_module = THIS_MODULE, | 999 | .cra_module = THIS_MODULE, |
800 | .cra_init = atmel_aes_cra_init, | 1000 | .cra_init = atmel_aes_cra_init, |
@@ -815,7 +1015,7 @@ static struct crypto_alg aes_algs[] = { | |||
815 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1015 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
816 | .cra_blocksize = AES_BLOCK_SIZE, | 1016 | .cra_blocksize = AES_BLOCK_SIZE, |
817 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1017 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
818 | .cra_alignmask = 0x0, | 1018 | .cra_alignmask = 0xf, |
819 | .cra_type = &crypto_ablkcipher_type, | 1019 | .cra_type = &crypto_ablkcipher_type, |
820 | .cra_module = THIS_MODULE, | 1020 | .cra_module = THIS_MODULE, |
821 | .cra_init = atmel_aes_cra_init, | 1021 | .cra_init = atmel_aes_cra_init, |
@@ -836,7 +1036,7 @@ static struct crypto_alg aes_algs[] = { | |||
836 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1036 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
837 | .cra_blocksize = CFB32_BLOCK_SIZE, | 1037 | .cra_blocksize = CFB32_BLOCK_SIZE, |
838 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1038 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
839 | .cra_alignmask = 0x0, | 1039 | .cra_alignmask = 0x3, |
840 | .cra_type = &crypto_ablkcipher_type, | 1040 | .cra_type = &crypto_ablkcipher_type, |
841 | .cra_module = THIS_MODULE, | 1041 | .cra_module = THIS_MODULE, |
842 | .cra_init = atmel_aes_cra_init, | 1042 | .cra_init = atmel_aes_cra_init, |
@@ -857,7 +1057,7 @@ static struct crypto_alg aes_algs[] = { | |||
857 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1057 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
858 | .cra_blocksize = CFB16_BLOCK_SIZE, | 1058 | .cra_blocksize = CFB16_BLOCK_SIZE, |
859 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1059 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
860 | .cra_alignmask = 0x0, | 1060 | .cra_alignmask = 0x1, |
861 | .cra_type = &crypto_ablkcipher_type, | 1061 | .cra_type = &crypto_ablkcipher_type, |
862 | .cra_module = THIS_MODULE, | 1062 | .cra_module = THIS_MODULE, |
863 | .cra_init = atmel_aes_cra_init, | 1063 | .cra_init = atmel_aes_cra_init, |
@@ -899,7 +1099,7 @@ static struct crypto_alg aes_algs[] = { | |||
899 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1099 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
900 | .cra_blocksize = AES_BLOCK_SIZE, | 1100 | .cra_blocksize = AES_BLOCK_SIZE, |
901 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1101 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
902 | .cra_alignmask = 0x0, | 1102 | .cra_alignmask = 0xf, |
903 | .cra_type = &crypto_ablkcipher_type, | 1103 | .cra_type = &crypto_ablkcipher_type, |
904 | .cra_module = THIS_MODULE, | 1104 | .cra_module = THIS_MODULE, |
905 | .cra_init = atmel_aes_cra_init, | 1105 | .cra_init = atmel_aes_cra_init, |
@@ -915,15 +1115,14 @@ static struct crypto_alg aes_algs[] = { | |||
915 | }, | 1115 | }, |
916 | }; | 1116 | }; |
917 | 1117 | ||
918 | static struct crypto_alg aes_cfb64_alg[] = { | 1118 | static struct crypto_alg aes_cfb64_alg = { |
919 | { | ||
920 | .cra_name = "cfb64(aes)", | 1119 | .cra_name = "cfb64(aes)", |
921 | .cra_driver_name = "atmel-cfb64-aes", | 1120 | .cra_driver_name = "atmel-cfb64-aes", |
922 | .cra_priority = 100, | 1121 | .cra_priority = 100, |
923 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1122 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
924 | .cra_blocksize = CFB64_BLOCK_SIZE, | 1123 | .cra_blocksize = CFB64_BLOCK_SIZE, |
925 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1124 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
926 | .cra_alignmask = 0x0, | 1125 | .cra_alignmask = 0x7, |
927 | .cra_type = &crypto_ablkcipher_type, | 1126 | .cra_type = &crypto_ablkcipher_type, |
928 | .cra_module = THIS_MODULE, | 1127 | .cra_module = THIS_MODULE, |
929 | .cra_init = atmel_aes_cra_init, | 1128 | .cra_init = atmel_aes_cra_init, |
@@ -936,7 +1135,6 @@ static struct crypto_alg aes_cfb64_alg[] = { | |||
936 | .encrypt = atmel_aes_cfb64_encrypt, | 1135 | .encrypt = atmel_aes_cfb64_encrypt, |
937 | .decrypt = atmel_aes_cfb64_decrypt, | 1136 | .decrypt = atmel_aes_cfb64_decrypt, |
938 | } | 1137 | } |
939 | }, | ||
940 | }; | 1138 | }; |
941 | 1139 | ||
942 | static void atmel_aes_queue_task(unsigned long data) | 1140 | static void atmel_aes_queue_task(unsigned long data) |
@@ -969,7 +1167,14 @@ static void atmel_aes_done_task(unsigned long data) | |||
969 | err = dd->err ? : err; | 1167 | err = dd->err ? : err; |
970 | 1168 | ||
971 | if (dd->total && !err) { | 1169 | if (dd->total && !err) { |
972 | err = atmel_aes_crypt_dma_start(dd); | 1170 | if (dd->flags & AES_FLAGS_FAST) { |
1171 | dd->in_sg = sg_next(dd->in_sg); | ||
1172 | dd->out_sg = sg_next(dd->out_sg); | ||
1173 | if (!dd->in_sg || !dd->out_sg) | ||
1174 | err = -EINVAL; | ||
1175 | } | ||
1176 | if (!err) | ||
1177 | err = atmel_aes_crypt_dma_start(dd); | ||
973 | if (!err) | 1178 | if (!err) |
974 | return; /* DMA started. Not fininishing. */ | 1179 | return; /* DMA started. Not fininishing. */ |
975 | } | 1180 | } |
@@ -1003,8 +1208,8 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) | |||
1003 | 1208 | ||
1004 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | 1209 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) |
1005 | crypto_unregister_alg(&aes_algs[i]); | 1210 | crypto_unregister_alg(&aes_algs[i]); |
1006 | if (dd->hw_version >= 0x130) | 1211 | if (dd->caps.has_cfb64) |
1007 | crypto_unregister_alg(&aes_cfb64_alg[0]); | 1212 | crypto_unregister_alg(&aes_cfb64_alg); |
1008 | } | 1213 | } |
1009 | 1214 | ||
1010 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | 1215 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) |
@@ -1017,10 +1222,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | |||
1017 | goto err_aes_algs; | 1222 | goto err_aes_algs; |
1018 | } | 1223 | } |
1019 | 1224 | ||
1020 | atmel_aes_hw_version_init(dd); | 1225 | if (dd->caps.has_cfb64) { |
1021 | 1226 | err = crypto_register_alg(&aes_cfb64_alg); | |
1022 | if (dd->hw_version >= 0x130) { | ||
1023 | err = crypto_register_alg(&aes_cfb64_alg[0]); | ||
1024 | if (err) | 1227 | if (err) |
1025 | goto err_aes_cfb64_alg; | 1228 | goto err_aes_cfb64_alg; |
1026 | } | 1229 | } |
@@ -1036,10 +1239,32 @@ err_aes_algs: | |||
1036 | return err; | 1239 | return err; |
1037 | } | 1240 | } |
1038 | 1241 | ||
1242 | static void atmel_aes_get_cap(struct atmel_aes_dev *dd) | ||
1243 | { | ||
1244 | dd->caps.has_dualbuff = 0; | ||
1245 | dd->caps.has_cfb64 = 0; | ||
1246 | dd->caps.max_burst_size = 1; | ||
1247 | |||
1248 | /* keep only major version number */ | ||
1249 | switch (dd->hw_version & 0xff0) { | ||
1250 | case 0x130: | ||
1251 | dd->caps.has_dualbuff = 1; | ||
1252 | dd->caps.has_cfb64 = 1; | ||
1253 | dd->caps.max_burst_size = 4; | ||
1254 | break; | ||
1255 | case 0x120: | ||
1256 | break; | ||
1257 | default: | ||
1258 | dev_warn(dd->dev, | ||
1259 | "Unmanaged aes version, set minimum capabilities\n"); | ||
1260 | break; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1039 | static int atmel_aes_probe(struct platform_device *pdev) | 1264 | static int atmel_aes_probe(struct platform_device *pdev) |
1040 | { | 1265 | { |
1041 | struct atmel_aes_dev *aes_dd; | 1266 | struct atmel_aes_dev *aes_dd; |
1042 | struct aes_platform_data *pdata; | 1267 | struct crypto_platform_data *pdata; |
1043 | struct device *dev = &pdev->dev; | 1268 | struct device *dev = &pdev->dev; |
1044 | struct resource *aes_res; | 1269 | struct resource *aes_res; |
1045 | unsigned long aes_phys_size; | 1270 | unsigned long aes_phys_size; |
@@ -1099,7 +1324,7 @@ static int atmel_aes_probe(struct platform_device *pdev) | |||
1099 | } | 1324 | } |
1100 | 1325 | ||
1101 | /* Initializing the clock */ | 1326 | /* Initializing the clock */ |
1102 | aes_dd->iclk = clk_get(&pdev->dev, NULL); | 1327 | aes_dd->iclk = clk_get(&pdev->dev, "aes_clk"); |
1103 | if (IS_ERR(aes_dd->iclk)) { | 1328 | if (IS_ERR(aes_dd->iclk)) { |
1104 | dev_err(dev, "clock intialization failed.\n"); | 1329 | dev_err(dev, "clock intialization failed.\n"); |
1105 | err = PTR_ERR(aes_dd->iclk); | 1330 | err = PTR_ERR(aes_dd->iclk); |
@@ -1113,7 +1338,15 @@ static int atmel_aes_probe(struct platform_device *pdev) | |||
1113 | goto aes_io_err; | 1338 | goto aes_io_err; |
1114 | } | 1339 | } |
1115 | 1340 | ||
1116 | err = atmel_aes_dma_init(aes_dd); | 1341 | atmel_aes_hw_version_init(aes_dd); |
1342 | |||
1343 | atmel_aes_get_cap(aes_dd); | ||
1344 | |||
1345 | err = atmel_aes_buff_init(aes_dd); | ||
1346 | if (err) | ||
1347 | goto err_aes_buff; | ||
1348 | |||
1349 | err = atmel_aes_dma_init(aes_dd, pdata); | ||
1117 | if (err) | 1350 | if (err) |
1118 | goto err_aes_dma; | 1351 | goto err_aes_dma; |
1119 | 1352 | ||
@@ -1135,6 +1368,8 @@ err_algs: | |||
1135 | spin_unlock(&atmel_aes.lock); | 1368 | spin_unlock(&atmel_aes.lock); |
1136 | atmel_aes_dma_cleanup(aes_dd); | 1369 | atmel_aes_dma_cleanup(aes_dd); |
1137 | err_aes_dma: | 1370 | err_aes_dma: |
1371 | atmel_aes_buff_cleanup(aes_dd); | ||
1372 | err_aes_buff: | ||
1138 | iounmap(aes_dd->io_base); | 1373 | iounmap(aes_dd->io_base); |
1139 | aes_io_err: | 1374 | aes_io_err: |
1140 | clk_put(aes_dd->iclk); | 1375 | clk_put(aes_dd->iclk); |