diff options
Diffstat (limited to 'drivers/crypto/omap-aes.c')
-rw-r--r-- | drivers/crypto/omap-aes.c | 960 |
1 files changed, 960 insertions, 0 deletions
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c new file mode 100644 index 000000000000..5b970d9e9956 --- /dev/null +++ b/drivers/crypto/omap-aes.c | |||
@@ -0,0 +1,960 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for OMAP AES HW acceleration. | ||
5 | * | ||
6 | * Copyright (c) 2010 Nokia Corporation | ||
7 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/clk.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/scatterlist.h> | ||
25 | #include <linux/dma-mapping.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/crypto.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <crypto/scatterwalk.h> | ||
30 | #include <crypto/aes.h> | ||
31 | |||
32 | #include <plat/cpu.h> | ||
33 | #include <plat/dma.h> | ||
34 | |||
35 | /* OMAP TRM gives bitfields as start:end, where start is the higher bit | ||
36 | number. For example 7:0 */ | ||
37 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) | ||
38 | #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) | ||
39 | |||
40 | #define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04)) | ||
41 | #define AES_REG_IV(x) (0x20 + ((x) * 0x04)) | ||
42 | |||
43 | #define AES_REG_CTRL 0x30 | ||
44 | #define AES_REG_CTRL_CTR_WIDTH (1 << 7) | ||
45 | #define AES_REG_CTRL_CTR (1 << 6) | ||
46 | #define AES_REG_CTRL_CBC (1 << 5) | ||
47 | #define AES_REG_CTRL_KEY_SIZE (3 << 3) | ||
48 | #define AES_REG_CTRL_DIRECTION (1 << 2) | ||
49 | #define AES_REG_CTRL_INPUT_READY (1 << 1) | ||
50 | #define AES_REG_CTRL_OUTPUT_READY (1 << 0) | ||
51 | |||
52 | #define AES_REG_DATA 0x34 | ||
53 | #define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04)) | ||
54 | |||
55 | #define AES_REG_REV 0x44 | ||
56 | #define AES_REG_REV_MAJOR 0xF0 | ||
57 | #define AES_REG_REV_MINOR 0x0F | ||
58 | |||
59 | #define AES_REG_MASK 0x48 | ||
60 | #define AES_REG_MASK_SIDLE (1 << 6) | ||
61 | #define AES_REG_MASK_START (1 << 5) | ||
62 | #define AES_REG_MASK_DMA_OUT_EN (1 << 3) | ||
63 | #define AES_REG_MASK_DMA_IN_EN (1 << 2) | ||
64 | #define AES_REG_MASK_SOFTRESET (1 << 1) | ||
65 | #define AES_REG_AUTOIDLE (1 << 0) | ||
66 | |||
67 | #define AES_REG_SYSSTATUS 0x4C | ||
68 | #define AES_REG_SYSSTATUS_RESETDONE (1 << 0) | ||
69 | |||
70 | #define DEFAULT_TIMEOUT (5*HZ) | ||
71 | |||
72 | #define FLAGS_MODE_MASK 0x000f | ||
73 | #define FLAGS_ENCRYPT BIT(0) | ||
74 | #define FLAGS_CBC BIT(1) | ||
75 | #define FLAGS_GIV BIT(2) | ||
76 | |||
77 | #define FLAGS_INIT BIT(4) | ||
78 | #define FLAGS_FAST BIT(5) | ||
79 | #define FLAGS_BUSY BIT(6) | ||
80 | |||
81 | struct omap_aes_ctx { | ||
82 | struct omap_aes_dev *dd; | ||
83 | |||
84 | int keylen; | ||
85 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | ||
86 | unsigned long flags; | ||
87 | }; | ||
88 | |||
89 | struct omap_aes_reqctx { | ||
90 | unsigned long mode; | ||
91 | }; | ||
92 | |||
93 | #define OMAP_AES_QUEUE_LENGTH 1 | ||
94 | #define OMAP_AES_CACHE_SIZE 0 | ||
95 | |||
96 | struct omap_aes_dev { | ||
97 | struct list_head list; | ||
98 | unsigned long phys_base; | ||
99 | void __iomem *io_base; | ||
100 | struct clk *iclk; | ||
101 | struct omap_aes_ctx *ctx; | ||
102 | struct device *dev; | ||
103 | unsigned long flags; | ||
104 | int err; | ||
105 | |||
106 | spinlock_t lock; | ||
107 | struct crypto_queue queue; | ||
108 | |||
109 | struct tasklet_struct done_task; | ||
110 | struct tasklet_struct queue_task; | ||
111 | |||
112 | struct ablkcipher_request *req; | ||
113 | size_t total; | ||
114 | struct scatterlist *in_sg; | ||
115 | size_t in_offset; | ||
116 | struct scatterlist *out_sg; | ||
117 | size_t out_offset; | ||
118 | |||
119 | size_t buflen; | ||
120 | void *buf_in; | ||
121 | size_t dma_size; | ||
122 | int dma_in; | ||
123 | int dma_lch_in; | ||
124 | dma_addr_t dma_addr_in; | ||
125 | void *buf_out; | ||
126 | int dma_out; | ||
127 | int dma_lch_out; | ||
128 | dma_addr_t dma_addr_out; | ||
129 | }; | ||
130 | |||
131 | /* keep registered devices data here */ | ||
132 | static LIST_HEAD(dev_list); | ||
133 | static DEFINE_SPINLOCK(list_lock); | ||
134 | |||
135 | static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) | ||
136 | { | ||
137 | return __raw_readl(dd->io_base + offset); | ||
138 | } | ||
139 | |||
140 | static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, | ||
141 | u32 value) | ||
142 | { | ||
143 | __raw_writel(value, dd->io_base + offset); | ||
144 | } | ||
145 | |||
146 | static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, | ||
147 | u32 value, u32 mask) | ||
148 | { | ||
149 | u32 val; | ||
150 | |||
151 | val = omap_aes_read(dd, offset); | ||
152 | val &= ~mask; | ||
153 | val |= value; | ||
154 | omap_aes_write(dd, offset, val); | ||
155 | } | ||
156 | |||
157 | static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, | ||
158 | u32 *value, int count) | ||
159 | { | ||
160 | for (; count--; value++, offset += 4) | ||
161 | omap_aes_write(dd, offset, *value); | ||
162 | } | ||
163 | |||
164 | static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) | ||
165 | { | ||
166 | unsigned long timeout = jiffies + DEFAULT_TIMEOUT; | ||
167 | |||
168 | while (!(omap_aes_read(dd, offset) & bit)) { | ||
169 | if (time_is_before_jiffies(timeout)) { | ||
170 | dev_err(dd->dev, "omap-aes timeout\n"); | ||
171 | return -ETIMEDOUT; | ||
172 | } | ||
173 | } | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int omap_aes_hw_init(struct omap_aes_dev *dd) | ||
178 | { | ||
179 | /* | ||
180 | * clocks are enabled when request starts and disabled when finished. | ||
181 | * It may be long delays between requests. | ||
182 | * Device might go to off mode to save power. | ||
183 | */ | ||
184 | clk_enable(dd->iclk); | ||
185 | |||
186 | if (!(dd->flags & FLAGS_INIT)) { | ||
187 | /* is it necessary to reset before every operation? */ | ||
188 | omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, | ||
189 | AES_REG_MASK_SOFTRESET); | ||
190 | /* | ||
191 | * prevent OCP bus error (SRESP) in case an access to the module | ||
192 | * is performed while the module is coming out of soft reset | ||
193 | */ | ||
194 | __asm__ __volatile__("nop"); | ||
195 | __asm__ __volatile__("nop"); | ||
196 | |||
197 | if (omap_aes_wait(dd, AES_REG_SYSSTATUS, | ||
198 | AES_REG_SYSSTATUS_RESETDONE)) | ||
199 | return -ETIMEDOUT; | ||
200 | |||
201 | dd->flags |= FLAGS_INIT; | ||
202 | dd->err = 0; | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | ||
209 | { | ||
210 | unsigned int key32; | ||
211 | int i, err; | ||
212 | u32 val, mask; | ||
213 | |||
214 | err = omap_aes_hw_init(dd); | ||
215 | if (err) | ||
216 | return err; | ||
217 | |||
218 | val = 0; | ||
219 | if (dd->dma_lch_out >= 0) | ||
220 | val |= AES_REG_MASK_DMA_OUT_EN; | ||
221 | if (dd->dma_lch_in >= 0) | ||
222 | val |= AES_REG_MASK_DMA_IN_EN; | ||
223 | |||
224 | mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN; | ||
225 | |||
226 | omap_aes_write_mask(dd, AES_REG_MASK, val, mask); | ||
227 | |||
228 | key32 = dd->ctx->keylen / sizeof(u32); | ||
229 | |||
230 | /* it seems a key should always be set even if it has not changed */ | ||
231 | for (i = 0; i < key32; i++) { | ||
232 | omap_aes_write(dd, AES_REG_KEY(i), | ||
233 | __le32_to_cpu(dd->ctx->key[i])); | ||
234 | } | ||
235 | |||
236 | if ((dd->flags & FLAGS_CBC) && dd->req->info) | ||
237 | omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4); | ||
238 | |||
239 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); | ||
240 | if (dd->flags & FLAGS_CBC) | ||
241 | val |= AES_REG_CTRL_CBC; | ||
242 | if (dd->flags & FLAGS_ENCRYPT) | ||
243 | val |= AES_REG_CTRL_DIRECTION; | ||
244 | |||
245 | mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | | ||
246 | AES_REG_CTRL_KEY_SIZE; | ||
247 | |||
248 | omap_aes_write_mask(dd, AES_REG_CTRL, val, mask); | ||
249 | |||
250 | /* IN */ | ||
251 | omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, | ||
252 | dd->phys_base + AES_REG_DATA, 0, 4); | ||
253 | |||
254 | omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); | ||
255 | omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); | ||
256 | |||
257 | /* OUT */ | ||
258 | omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, | ||
259 | dd->phys_base + AES_REG_DATA, 0, 4); | ||
260 | |||
261 | omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); | ||
262 | omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) | ||
268 | { | ||
269 | struct omap_aes_dev *dd = NULL, *tmp; | ||
270 | |||
271 | spin_lock_bh(&list_lock); | ||
272 | if (!ctx->dd) { | ||
273 | list_for_each_entry(tmp, &dev_list, list) { | ||
274 | /* FIXME: take fist available aes core */ | ||
275 | dd = tmp; | ||
276 | break; | ||
277 | } | ||
278 | ctx->dd = dd; | ||
279 | } else { | ||
280 | /* already found before */ | ||
281 | dd = ctx->dd; | ||
282 | } | ||
283 | spin_unlock_bh(&list_lock); | ||
284 | |||
285 | return dd; | ||
286 | } | ||
287 | |||
288 | static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) | ||
289 | { | ||
290 | struct omap_aes_dev *dd = data; | ||
291 | |||
292 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { | ||
293 | pr_err("omap-aes DMA error status: 0x%hx\n", ch_status); | ||
294 | dd->err = -EIO; | ||
295 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | ||
296 | } else if (lch == dd->dma_lch_in) { | ||
297 | return; | ||
298 | } | ||
299 | |||
300 | /* dma_lch_out - completed */ | ||
301 | tasklet_schedule(&dd->done_task); | ||
302 | } | ||
303 | |||
304 | static int omap_aes_dma_init(struct omap_aes_dev *dd) | ||
305 | { | ||
306 | int err = -ENOMEM; | ||
307 | |||
308 | dd->dma_lch_out = -1; | ||
309 | dd->dma_lch_in = -1; | ||
310 | |||
311 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | ||
312 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | ||
313 | dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; | ||
314 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | ||
315 | |||
316 | if (!dd->buf_in || !dd->buf_out) { | ||
317 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
318 | goto err_alloc; | ||
319 | } | ||
320 | |||
321 | /* MAP here */ | ||
322 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, | ||
323 | DMA_TO_DEVICE); | ||
324 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
325 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
326 | err = -EINVAL; | ||
327 | goto err_map_in; | ||
328 | } | ||
329 | |||
330 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, | ||
331 | DMA_FROM_DEVICE); | ||
332 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | ||
333 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
334 | err = -EINVAL; | ||
335 | goto err_map_out; | ||
336 | } | ||
337 | |||
338 | err = omap_request_dma(dd->dma_in, "omap-aes-rx", | ||
339 | omap_aes_dma_callback, dd, &dd->dma_lch_in); | ||
340 | if (err) { | ||
341 | dev_err(dd->dev, "Unable to request DMA channel\n"); | ||
342 | goto err_dma_in; | ||
343 | } | ||
344 | err = omap_request_dma(dd->dma_out, "omap-aes-tx", | ||
345 | omap_aes_dma_callback, dd, &dd->dma_lch_out); | ||
346 | if (err) { | ||
347 | dev_err(dd->dev, "Unable to request DMA channel\n"); | ||
348 | goto err_dma_out; | ||
349 | } | ||
350 | |||
351 | return 0; | ||
352 | |||
353 | err_dma_out: | ||
354 | omap_free_dma(dd->dma_lch_in); | ||
355 | err_dma_in: | ||
356 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
357 | DMA_FROM_DEVICE); | ||
358 | err_map_out: | ||
359 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | ||
360 | err_map_in: | ||
361 | free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); | ||
362 | free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); | ||
363 | err_alloc: | ||
364 | if (err) | ||
365 | pr_err("error: %d\n", err); | ||
366 | return err; | ||
367 | } | ||
368 | |||
369 | static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) | ||
370 | { | ||
371 | omap_free_dma(dd->dma_lch_out); | ||
372 | omap_free_dma(dd->dma_lch_in); | ||
373 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
374 | DMA_FROM_DEVICE); | ||
375 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | ||
376 | free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); | ||
377 | free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); | ||
378 | } | ||
379 | |||
380 | static void sg_copy_buf(void *buf, struct scatterlist *sg, | ||
381 | unsigned int start, unsigned int nbytes, int out) | ||
382 | { | ||
383 | struct scatter_walk walk; | ||
384 | |||
385 | if (!nbytes) | ||
386 | return; | ||
387 | |||
388 | scatterwalk_start(&walk, sg); | ||
389 | scatterwalk_advance(&walk, start); | ||
390 | scatterwalk_copychunks(buf, &walk, nbytes, out); | ||
391 | scatterwalk_done(&walk, out, 0); | ||
392 | } | ||
393 | |||
394 | static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, | ||
395 | size_t buflen, size_t total, int out) | ||
396 | { | ||
397 | unsigned int count, off = 0; | ||
398 | |||
399 | while (buflen && total) { | ||
400 | count = min((*sg)->length - *offset, total); | ||
401 | count = min(count, buflen); | ||
402 | |||
403 | if (!count) | ||
404 | return off; | ||
405 | |||
406 | /* | ||
407 | * buflen and total are AES_BLOCK_SIZE size aligned, | ||
408 | * so count should be also aligned | ||
409 | */ | ||
410 | |||
411 | sg_copy_buf(buf + off, *sg, *offset, count, out); | ||
412 | |||
413 | off += count; | ||
414 | buflen -= count; | ||
415 | *offset += count; | ||
416 | total -= count; | ||
417 | |||
418 | if (*offset == (*sg)->length) { | ||
419 | *sg = sg_next(*sg); | ||
420 | if (*sg) | ||
421 | *offset = 0; | ||
422 | else | ||
423 | total = 0; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | return off; | ||
428 | } | ||
429 | |||
430 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | ||
431 | dma_addr_t dma_addr_out, int length) | ||
432 | { | ||
433 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
434 | struct omap_aes_dev *dd = ctx->dd; | ||
435 | int len32; | ||
436 | |||
437 | pr_debug("len: %d\n", length); | ||
438 | |||
439 | dd->dma_size = length; | ||
440 | |||
441 | if (!(dd->flags & FLAGS_FAST)) | ||
442 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | ||
443 | DMA_TO_DEVICE); | ||
444 | |||
445 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
446 | |||
447 | /* IN */ | ||
448 | omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32, | ||
449 | len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in, | ||
450 | OMAP_DMA_DST_SYNC); | ||
451 | |||
452 | omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC, | ||
453 | dma_addr_in, 0, 0); | ||
454 | |||
455 | /* OUT */ | ||
456 | omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32, | ||
457 | len32, 1, OMAP_DMA_SYNC_PACKET, | ||
458 | dd->dma_out, OMAP_DMA_SRC_SYNC); | ||
459 | |||
460 | omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, | ||
461 | dma_addr_out, 0, 0); | ||
462 | |||
463 | omap_start_dma(dd->dma_lch_in); | ||
464 | omap_start_dma(dd->dma_lch_out); | ||
465 | |||
466 | /* start DMA or disable idle mode */ | ||
467 | omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, | ||
468 | AES_REG_MASK_START); | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | ||
474 | { | ||
475 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( | ||
476 | crypto_ablkcipher_reqtfm(dd->req)); | ||
477 | int err, fast = 0, in, out; | ||
478 | size_t count; | ||
479 | dma_addr_t addr_in, addr_out; | ||
480 | |||
481 | pr_debug("total: %d\n", dd->total); | ||
482 | |||
483 | if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { | ||
484 | /* check for alignment */ | ||
485 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); | ||
486 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); | ||
487 | |||
488 | fast = in && out; | ||
489 | } | ||
490 | |||
491 | if (fast) { | ||
492 | count = min(dd->total, sg_dma_len(dd->in_sg)); | ||
493 | count = min(count, sg_dma_len(dd->out_sg)); | ||
494 | |||
495 | if (count != dd->total) { | ||
496 | pr_err("request length != buffer length\n"); | ||
497 | return -EINVAL; | ||
498 | } | ||
499 | |||
500 | pr_debug("fast\n"); | ||
501 | |||
502 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
503 | if (!err) { | ||
504 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
505 | return -EINVAL; | ||
506 | } | ||
507 | |||
508 | err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
509 | if (!err) { | ||
510 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
511 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
512 | return -EINVAL; | ||
513 | } | ||
514 | |||
515 | addr_in = sg_dma_address(dd->in_sg); | ||
516 | addr_out = sg_dma_address(dd->out_sg); | ||
517 | |||
518 | dd->flags |= FLAGS_FAST; | ||
519 | |||
520 | } else { | ||
521 | /* use cache buffers */ | ||
522 | count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, | ||
523 | dd->buflen, dd->total, 0); | ||
524 | |||
525 | addr_in = dd->dma_addr_in; | ||
526 | addr_out = dd->dma_addr_out; | ||
527 | |||
528 | dd->flags &= ~FLAGS_FAST; | ||
529 | |||
530 | } | ||
531 | |||
532 | dd->total -= count; | ||
533 | |||
534 | err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); | ||
535 | if (err) { | ||
536 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
537 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | ||
538 | } | ||
539 | |||
540 | return err; | ||
541 | } | ||
542 | |||
543 | static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | ||
544 | { | ||
545 | struct ablkcipher_request *req = dd->req; | ||
546 | |||
547 | pr_debug("err: %d\n", err); | ||
548 | |||
549 | clk_disable(dd->iclk); | ||
550 | dd->flags &= ~FLAGS_BUSY; | ||
551 | |||
552 | req->base.complete(&req->base, err); | ||
553 | } | ||
554 | |||
555 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | ||
556 | { | ||
557 | int err = 0; | ||
558 | size_t count; | ||
559 | |||
560 | pr_debug("total: %d\n", dd->total); | ||
561 | |||
562 | omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); | ||
563 | |||
564 | omap_stop_dma(dd->dma_lch_in); | ||
565 | omap_stop_dma(dd->dma_lch_out); | ||
566 | |||
567 | if (dd->flags & FLAGS_FAST) { | ||
568 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
569 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
570 | } else { | ||
571 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | ||
572 | dd->dma_size, DMA_FROM_DEVICE); | ||
573 | |||
574 | /* copy data */ | ||
575 | count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, | ||
576 | dd->buflen, dd->dma_size, 1); | ||
577 | if (count != dd->dma_size) { | ||
578 | err = -EINVAL; | ||
579 | pr_err("not all data converted: %u\n", count); | ||
580 | } | ||
581 | } | ||
582 | |||
583 | return err; | ||
584 | } | ||
585 | |||
586 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, | ||
587 | struct ablkcipher_request *req) | ||
588 | { | ||
589 | struct crypto_async_request *async_req, *backlog; | ||
590 | struct omap_aes_ctx *ctx; | ||
591 | struct omap_aes_reqctx *rctx; | ||
592 | unsigned long flags; | ||
593 | int err, ret = 0; | ||
594 | |||
595 | spin_lock_irqsave(&dd->lock, flags); | ||
596 | if (req) | ||
597 | ret = ablkcipher_enqueue_request(&dd->queue, req); | ||
598 | if (dd->flags & FLAGS_BUSY) { | ||
599 | spin_unlock_irqrestore(&dd->lock, flags); | ||
600 | return ret; | ||
601 | } | ||
602 | backlog = crypto_get_backlog(&dd->queue); | ||
603 | async_req = crypto_dequeue_request(&dd->queue); | ||
604 | if (async_req) | ||
605 | dd->flags |= FLAGS_BUSY; | ||
606 | spin_unlock_irqrestore(&dd->lock, flags); | ||
607 | |||
608 | if (!async_req) | ||
609 | return ret; | ||
610 | |||
611 | if (backlog) | ||
612 | backlog->complete(backlog, -EINPROGRESS); | ||
613 | |||
614 | req = ablkcipher_request_cast(async_req); | ||
615 | |||
616 | /* assign new request to device */ | ||
617 | dd->req = req; | ||
618 | dd->total = req->nbytes; | ||
619 | dd->in_offset = 0; | ||
620 | dd->in_sg = req->src; | ||
621 | dd->out_offset = 0; | ||
622 | dd->out_sg = req->dst; | ||
623 | |||
624 | rctx = ablkcipher_request_ctx(req); | ||
625 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
626 | rctx->mode &= FLAGS_MODE_MASK; | ||
627 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; | ||
628 | |||
629 | dd->ctx = ctx; | ||
630 | ctx->dd = dd; | ||
631 | |||
632 | err = omap_aes_write_ctrl(dd); | ||
633 | if (!err) | ||
634 | err = omap_aes_crypt_dma_start(dd); | ||
635 | if (err) { | ||
636 | /* aes_task will not finish it, so do it here */ | ||
637 | omap_aes_finish_req(dd, err); | ||
638 | tasklet_schedule(&dd->queue_task); | ||
639 | } | ||
640 | |||
641 | return ret; /* return ret, which is enqueue return value */ | ||
642 | } | ||
643 | |||
644 | static void omap_aes_done_task(unsigned long data) | ||
645 | { | ||
646 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | ||
647 | int err; | ||
648 | |||
649 | pr_debug("enter\n"); | ||
650 | |||
651 | err = omap_aes_crypt_dma_stop(dd); | ||
652 | |||
653 | err = dd->err ? : err; | ||
654 | |||
655 | if (dd->total && !err) { | ||
656 | err = omap_aes_crypt_dma_start(dd); | ||
657 | if (!err) | ||
658 | return; /* DMA started. Not fininishing. */ | ||
659 | } | ||
660 | |||
661 | omap_aes_finish_req(dd, err); | ||
662 | omap_aes_handle_queue(dd, NULL); | ||
663 | |||
664 | pr_debug("exit\n"); | ||
665 | } | ||
666 | |||
667 | static void omap_aes_queue_task(unsigned long data) | ||
668 | { | ||
669 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | ||
670 | |||
671 | omap_aes_handle_queue(dd, NULL); | ||
672 | } | ||
673 | |||
674 | static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
675 | { | ||
676 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( | ||
677 | crypto_ablkcipher_reqtfm(req)); | ||
678 | struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
679 | struct omap_aes_dev *dd; | ||
680 | |||
681 | pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, | ||
682 | !!(mode & FLAGS_ENCRYPT), | ||
683 | !!(mode & FLAGS_CBC)); | ||
684 | |||
685 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
686 | pr_err("request size is not exact amount of AES blocks\n"); | ||
687 | return -EINVAL; | ||
688 | } | ||
689 | |||
690 | dd = omap_aes_find_dev(ctx); | ||
691 | if (!dd) | ||
692 | return -ENODEV; | ||
693 | |||
694 | rctx->mode = mode; | ||
695 | |||
696 | return omap_aes_handle_queue(dd, req); | ||
697 | } | ||
698 | |||
699 | /* ********************** ALG API ************************************ */ | ||
700 | |||
701 | static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
702 | unsigned int keylen) | ||
703 | { | ||
704 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
705 | |||
706 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | ||
707 | keylen != AES_KEYSIZE_256) | ||
708 | return -EINVAL; | ||
709 | |||
710 | pr_debug("enter, keylen: %d\n", keylen); | ||
711 | |||
712 | memcpy(ctx->key, key, keylen); | ||
713 | ctx->keylen = keylen; | ||
714 | |||
715 | return 0; | ||
716 | } | ||
717 | |||
718 | static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
719 | { | ||
720 | return omap_aes_crypt(req, FLAGS_ENCRYPT); | ||
721 | } | ||
722 | |||
723 | static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
724 | { | ||
725 | return omap_aes_crypt(req, 0); | ||
726 | } | ||
727 | |||
728 | static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
729 | { | ||
730 | return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); | ||
731 | } | ||
732 | |||
733 | static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
734 | { | ||
735 | return omap_aes_crypt(req, FLAGS_CBC); | ||
736 | } | ||
737 | |||
738 | static int omap_aes_cra_init(struct crypto_tfm *tfm) | ||
739 | { | ||
740 | pr_debug("enter\n"); | ||
741 | |||
742 | tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); | ||
743 | |||
744 | return 0; | ||
745 | } | ||
746 | |||
747 | static void omap_aes_cra_exit(struct crypto_tfm *tfm) | ||
748 | { | ||
749 | pr_debug("enter\n"); | ||
750 | } | ||
751 | |||
752 | /* ********************** ALGS ************************************ */ | ||
753 | |||
754 | static struct crypto_alg algs[] = { | ||
755 | { | ||
756 | .cra_name = "ecb(aes)", | ||
757 | .cra_driver_name = "ecb-aes-omap", | ||
758 | .cra_priority = 100, | ||
759 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
760 | .cra_blocksize = AES_BLOCK_SIZE, | ||
761 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | ||
762 | .cra_alignmask = 0, | ||
763 | .cra_type = &crypto_ablkcipher_type, | ||
764 | .cra_module = THIS_MODULE, | ||
765 | .cra_init = omap_aes_cra_init, | ||
766 | .cra_exit = omap_aes_cra_exit, | ||
767 | .cra_u.ablkcipher = { | ||
768 | .min_keysize = AES_MIN_KEY_SIZE, | ||
769 | .max_keysize = AES_MAX_KEY_SIZE, | ||
770 | .setkey = omap_aes_setkey, | ||
771 | .encrypt = omap_aes_ecb_encrypt, | ||
772 | .decrypt = omap_aes_ecb_decrypt, | ||
773 | } | ||
774 | }, | ||
775 | { | ||
776 | .cra_name = "cbc(aes)", | ||
777 | .cra_driver_name = "cbc-aes-omap", | ||
778 | .cra_priority = 100, | ||
779 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
780 | .cra_blocksize = AES_BLOCK_SIZE, | ||
781 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | ||
782 | .cra_alignmask = 0, | ||
783 | .cra_type = &crypto_ablkcipher_type, | ||
784 | .cra_module = THIS_MODULE, | ||
785 | .cra_init = omap_aes_cra_init, | ||
786 | .cra_exit = omap_aes_cra_exit, | ||
787 | .cra_u.ablkcipher = { | ||
788 | .min_keysize = AES_MIN_KEY_SIZE, | ||
789 | .max_keysize = AES_MAX_KEY_SIZE, | ||
790 | .ivsize = AES_BLOCK_SIZE, | ||
791 | .setkey = omap_aes_setkey, | ||
792 | .encrypt = omap_aes_cbc_encrypt, | ||
793 | .decrypt = omap_aes_cbc_decrypt, | ||
794 | } | ||
795 | } | ||
796 | }; | ||
797 | |||
798 | static int omap_aes_probe(struct platform_device *pdev) | ||
799 | { | ||
800 | struct device *dev = &pdev->dev; | ||
801 | struct omap_aes_dev *dd; | ||
802 | struct resource *res; | ||
803 | int err = -ENOMEM, i, j; | ||
804 | u32 reg; | ||
805 | |||
806 | dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); | ||
807 | if (dd == NULL) { | ||
808 | dev_err(dev, "unable to alloc data struct.\n"); | ||
809 | goto err_data; | ||
810 | } | ||
811 | dd->dev = dev; | ||
812 | platform_set_drvdata(pdev, dd); | ||
813 | |||
814 | spin_lock_init(&dd->lock); | ||
815 | crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); | ||
816 | |||
817 | /* Get the base address */ | ||
818 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
819 | if (!res) { | ||
820 | dev_err(dev, "invalid resource type\n"); | ||
821 | err = -ENODEV; | ||
822 | goto err_res; | ||
823 | } | ||
824 | dd->phys_base = res->start; | ||
825 | |||
826 | /* Get the DMA */ | ||
827 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
828 | if (!res) | ||
829 | dev_info(dev, "no DMA info\n"); | ||
830 | else | ||
831 | dd->dma_out = res->start; | ||
832 | |||
833 | /* Get the DMA */ | ||
834 | res = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
835 | if (!res) | ||
836 | dev_info(dev, "no DMA info\n"); | ||
837 | else | ||
838 | dd->dma_in = res->start; | ||
839 | |||
840 | /* Initializing the clock */ | ||
841 | dd->iclk = clk_get(dev, "ick"); | ||
842 | if (IS_ERR(dd->iclk)) { | ||
843 | dev_err(dev, "clock intialization failed.\n"); | ||
844 | err = PTR_ERR(dd->iclk); | ||
845 | goto err_res; | ||
846 | } | ||
847 | |||
848 | dd->io_base = ioremap(dd->phys_base, SZ_4K); | ||
849 | if (!dd->io_base) { | ||
850 | dev_err(dev, "can't ioremap\n"); | ||
851 | err = -ENOMEM; | ||
852 | goto err_io; | ||
853 | } | ||
854 | |||
855 | clk_enable(dd->iclk); | ||
856 | reg = omap_aes_read(dd, AES_REG_REV); | ||
857 | dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", | ||
858 | (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); | ||
859 | clk_disable(dd->iclk); | ||
860 | |||
861 | tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); | ||
862 | tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); | ||
863 | |||
864 | err = omap_aes_dma_init(dd); | ||
865 | if (err) | ||
866 | goto err_dma; | ||
867 | |||
868 | INIT_LIST_HEAD(&dd->list); | ||
869 | spin_lock(&list_lock); | ||
870 | list_add_tail(&dd->list, &dev_list); | ||
871 | spin_unlock(&list_lock); | ||
872 | |||
873 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
874 | pr_debug("i: %d\n", i); | ||
875 | INIT_LIST_HEAD(&algs[i].cra_list); | ||
876 | err = crypto_register_alg(&algs[i]); | ||
877 | if (err) | ||
878 | goto err_algs; | ||
879 | } | ||
880 | |||
881 | pr_info("probe() done\n"); | ||
882 | |||
883 | return 0; | ||
884 | err_algs: | ||
885 | for (j = 0; j < i; j++) | ||
886 | crypto_unregister_alg(&algs[j]); | ||
887 | omap_aes_dma_cleanup(dd); | ||
888 | err_dma: | ||
889 | tasklet_kill(&dd->done_task); | ||
890 | tasklet_kill(&dd->queue_task); | ||
891 | iounmap(dd->io_base); | ||
892 | err_io: | ||
893 | clk_put(dd->iclk); | ||
894 | err_res: | ||
895 | kfree(dd); | ||
896 | dd = NULL; | ||
897 | err_data: | ||
898 | dev_err(dev, "initialization failed.\n"); | ||
899 | return err; | ||
900 | } | ||
901 | |||
902 | static int omap_aes_remove(struct platform_device *pdev) | ||
903 | { | ||
904 | struct omap_aes_dev *dd = platform_get_drvdata(pdev); | ||
905 | int i; | ||
906 | |||
907 | if (!dd) | ||
908 | return -ENODEV; | ||
909 | |||
910 | spin_lock(&list_lock); | ||
911 | list_del(&dd->list); | ||
912 | spin_unlock(&list_lock); | ||
913 | |||
914 | for (i = 0; i < ARRAY_SIZE(algs); i++) | ||
915 | crypto_unregister_alg(&algs[i]); | ||
916 | |||
917 | tasklet_kill(&dd->done_task); | ||
918 | tasklet_kill(&dd->queue_task); | ||
919 | omap_aes_dma_cleanup(dd); | ||
920 | iounmap(dd->io_base); | ||
921 | clk_put(dd->iclk); | ||
922 | kfree(dd); | ||
923 | dd = NULL; | ||
924 | |||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | static struct platform_driver omap_aes_driver = { | ||
929 | .probe = omap_aes_probe, | ||
930 | .remove = omap_aes_remove, | ||
931 | .driver = { | ||
932 | .name = "omap-aes", | ||
933 | .owner = THIS_MODULE, | ||
934 | }, | ||
935 | }; | ||
936 | |||
937 | static int __init omap_aes_mod_init(void) | ||
938 | { | ||
939 | pr_info("loading %s driver\n", "omap-aes"); | ||
940 | |||
941 | if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) { | ||
942 | pr_err("Unsupported cpu\n"); | ||
943 | return -ENODEV; | ||
944 | } | ||
945 | |||
946 | return platform_driver_register(&omap_aes_driver); | ||
947 | } | ||
948 | |||
949 | static void __exit omap_aes_mod_exit(void) | ||
950 | { | ||
951 | platform_driver_unregister(&omap_aes_driver); | ||
952 | } | ||
953 | |||
954 | module_init(omap_aes_mod_init); | ||
955 | module_exit(omap_aes_mod_exit); | ||
956 | |||
957 | MODULE_DESCRIPTION("OMAP AES hw acceleration support."); | ||
958 | MODULE_LICENSE("GPL v2"); | ||
959 | MODULE_AUTHOR("Dmitry Kasatkin"); | ||
960 | |||