diff options
Diffstat (limited to 'drivers/crypto/tegra-aes.c')
-rw-r--r-- | drivers/crypto/tegra-aes.c | 1096 |
1 files changed, 1096 insertions, 0 deletions
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c new file mode 100644 index 000000000000..422a9766c7c9 --- /dev/null +++ b/drivers/crypto/tegra-aes.c | |||
@@ -0,0 +1,1096 @@ | |||
1 | /* | ||
2 | * drivers/crypto/tegra-aes.c | ||
3 | * | ||
4 | * Driver for NVIDIA Tegra AES hardware engine residing inside the | ||
5 | * Bit Stream Engine for Video (BSEV) hardware block. | ||
6 | * | ||
7 | * The programming sequence for this engine is with the help | ||
8 | * of commands which travel via a command queue residing between the | ||
9 | * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM) | ||
10 | * where the final input plaintext, keys and the IV have to be copied | ||
11 | * before starting the encrypt/decrypt operation. | ||
12 | * | ||
13 | * Copyright (c) 2010, NVIDIA Corporation. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
21 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
23 | * more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License along | ||
26 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
27 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
28 | */ | ||
29 | |||
30 | #include <linux/module.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/errno.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/clk.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/scatterlist.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/io.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/completion.h> | ||
42 | #include <linux/workqueue.h> | ||
43 | |||
44 | #include <mach/clk.h> | ||
45 | |||
46 | #include <crypto/scatterwalk.h> | ||
47 | #include <crypto/aes.h> | ||
48 | #include <crypto/internal/rng.h> | ||
49 | |||
50 | #include "tegra-aes.h" | ||
51 | |||
52 | #define FLAGS_MODE_MASK 0x00FF | ||
53 | #define FLAGS_ENCRYPT BIT(0) | ||
54 | #define FLAGS_CBC BIT(1) | ||
55 | #define FLAGS_GIV BIT(2) | ||
56 | #define FLAGS_RNG BIT(3) | ||
57 | #define FLAGS_OFB BIT(4) | ||
58 | #define FLAGS_NEW_KEY BIT(5) | ||
59 | #define FLAGS_NEW_IV BIT(6) | ||
60 | #define FLAGS_INIT BIT(7) | ||
61 | #define FLAGS_FAST BIT(8) | ||
62 | #define FLAGS_BUSY 9 | ||
63 | |||
64 | /* | ||
65 | * Defines AES engine Max process bytes size in one go, which takes 1 msec. | ||
66 | * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte | ||
67 | * The duration CPU can use the BSE to 1 msec, then the number of available | ||
68 | * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB | ||
69 | * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB. | ||
70 | */ | ||
71 | #define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000 | ||
72 | |||
73 | /* | ||
74 | * The key table length is 64 bytes | ||
75 | * (This includes first upto 32 bytes key + 16 bytes original initial vector | ||
76 | * and 16 bytes updated initial vector) | ||
77 | */ | ||
78 | #define AES_HW_KEY_TABLE_LENGTH_BYTES 64 | ||
79 | |||
80 | /* | ||
81 | * The memory being used is divides as follows: | ||
82 | * 1. Key - 32 bytes | ||
83 | * 2. Original IV - 16 bytes | ||
84 | * 3. Updated IV - 16 bytes | ||
85 | * 4. Key schedule - 256 bytes | ||
86 | * | ||
87 | * 1+2+3 constitute the hw key table. | ||
88 | */ | ||
89 | #define AES_HW_IV_SIZE 16 | ||
90 | #define AES_HW_KEYSCHEDULE_LEN 256 | ||
91 | #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN) | ||
92 | |||
93 | /* Define commands required for AES operation */ | ||
94 | enum { | ||
95 | CMD_BLKSTARTENGINE = 0x0E, | ||
96 | CMD_DMASETUP = 0x10, | ||
97 | CMD_DMACOMPLETE = 0x11, | ||
98 | CMD_SETTABLE = 0x15, | ||
99 | CMD_MEMDMAVD = 0x22, | ||
100 | }; | ||
101 | |||
102 | /* Define sub-commands */ | ||
103 | enum { | ||
104 | SUBCMD_VRAM_SEL = 0x1, | ||
105 | SUBCMD_CRYPTO_TABLE_SEL = 0x3, | ||
106 | SUBCMD_KEY_TABLE_SEL = 0x8, | ||
107 | }; | ||
108 | |||
109 | /* memdma_vd command */ | ||
110 | #define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */ | ||
111 | #define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */ | ||
112 | #define MEMDMA_DIR_SHIFT 25 | ||
113 | #define MEMDMA_NUM_WORDS_SHIFT 12 | ||
114 | |||
115 | /* command queue bit shifts */ | ||
116 | enum { | ||
117 | CMDQ_KEYTABLEADDR_SHIFT = 0, | ||
118 | CMDQ_KEYTABLEID_SHIFT = 17, | ||
119 | CMDQ_VRAMSEL_SHIFT = 23, | ||
120 | CMDQ_TABLESEL_SHIFT = 24, | ||
121 | CMDQ_OPCODE_SHIFT = 26, | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * The secure key slot contains a unique secure key generated | ||
126 | * and loaded by the bootloader. This slot is marked as non-accessible | ||
127 | * to the kernel. | ||
128 | */ | ||
129 | #define SSK_SLOT_NUM 4 | ||
130 | |||
131 | #define AES_NR_KEYSLOTS 8 | ||
132 | #define TEGRA_AES_QUEUE_LENGTH 50 | ||
133 | #define DEFAULT_RNG_BLK_SZ 16 | ||
134 | |||
135 | /* The command queue depth */ | ||
136 | #define AES_HW_MAX_ICQ_LENGTH 5 | ||
137 | |||
138 | struct tegra_aes_slot { | ||
139 | struct list_head node; | ||
140 | int slot_num; | ||
141 | }; | ||
142 | |||
143 | static struct tegra_aes_slot ssk = { | ||
144 | .slot_num = SSK_SLOT_NUM, | ||
145 | }; | ||
146 | |||
147 | struct tegra_aes_reqctx { | ||
148 | unsigned long mode; | ||
149 | }; | ||
150 | |||
151 | struct tegra_aes_dev { | ||
152 | struct device *dev; | ||
153 | void __iomem *io_base; | ||
154 | dma_addr_t ivkey_phys_base; | ||
155 | void __iomem *ivkey_base; | ||
156 | struct clk *aes_clk; | ||
157 | struct tegra_aes_ctx *ctx; | ||
158 | int irq; | ||
159 | unsigned long flags; | ||
160 | struct completion op_complete; | ||
161 | u32 *buf_in; | ||
162 | dma_addr_t dma_buf_in; | ||
163 | u32 *buf_out; | ||
164 | dma_addr_t dma_buf_out; | ||
165 | u8 *iv; | ||
166 | u8 dt[DEFAULT_RNG_BLK_SZ]; | ||
167 | int ivlen; | ||
168 | u64 ctr; | ||
169 | spinlock_t lock; | ||
170 | struct crypto_queue queue; | ||
171 | struct tegra_aes_slot *slots; | ||
172 | struct ablkcipher_request *req; | ||
173 | size_t total; | ||
174 | struct scatterlist *in_sg; | ||
175 | size_t in_offset; | ||
176 | struct scatterlist *out_sg; | ||
177 | size_t out_offset; | ||
178 | }; | ||
179 | |||
180 | static struct tegra_aes_dev *aes_dev; | ||
181 | |||
182 | struct tegra_aes_ctx { | ||
183 | struct tegra_aes_dev *dd; | ||
184 | unsigned long flags; | ||
185 | struct tegra_aes_slot *slot; | ||
186 | u8 key[AES_MAX_KEY_SIZE]; | ||
187 | size_t keylen; | ||
188 | }; | ||
189 | |||
190 | static struct tegra_aes_ctx rng_ctx = { | ||
191 | .flags = FLAGS_NEW_KEY, | ||
192 | .keylen = AES_KEYSIZE_128, | ||
193 | }; | ||
194 | |||
195 | /* keep registered devices data here */ | ||
196 | static struct list_head dev_list; | ||
197 | static DEFINE_SPINLOCK(list_lock); | ||
198 | static DEFINE_MUTEX(aes_lock); | ||
199 | |||
200 | static void aes_workqueue_handler(struct work_struct *work); | ||
201 | static DECLARE_WORK(aes_work, aes_workqueue_handler); | ||
202 | static struct workqueue_struct *aes_wq; | ||
203 | |||
204 | extern unsigned long long tegra_chip_uid(void); | ||
205 | |||
206 | static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) | ||
207 | { | ||
208 | return readl(dd->io_base + offset); | ||
209 | } | ||
210 | |||
211 | static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset) | ||
212 | { | ||
213 | writel(val, dd->io_base + offset); | ||
214 | } | ||
215 | |||
216 | static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr, | ||
217 | int nblocks, int mode, bool upd_iv) | ||
218 | { | ||
219 | u32 cmdq[AES_HW_MAX_ICQ_LENGTH]; | ||
220 | int i, eng_busy, icq_empty, ret; | ||
221 | u32 value; | ||
222 | |||
223 | /* reset all the interrupt bits */ | ||
224 | aes_writel(dd, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS); | ||
225 | |||
226 | /* enable error, dma xfer complete interrupts */ | ||
227 | aes_writel(dd, 0x33, TEGRA_AES_INT_ENB); | ||
228 | |||
229 | cmdq[0] = CMD_DMASETUP << CMDQ_OPCODE_SHIFT; | ||
230 | cmdq[1] = in_addr; | ||
231 | cmdq[2] = CMD_BLKSTARTENGINE << CMDQ_OPCODE_SHIFT | (nblocks-1); | ||
232 | cmdq[3] = CMD_DMACOMPLETE << CMDQ_OPCODE_SHIFT; | ||
233 | |||
234 | value = aes_readl(dd, TEGRA_AES_CMDQUE_CONTROL); | ||
235 | /* access SDRAM through AHB */ | ||
236 | value &= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD; | ||
237 | value &= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD; | ||
238 | value |= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD | | ||
239 | TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD | | ||
240 | TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD; | ||
241 | aes_writel(dd, value, TEGRA_AES_CMDQUE_CONTROL); | ||
242 | dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value); | ||
243 | |||
244 | value = (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT) | | ||
245 | ((dd->ctx->keylen * 8) << | ||
246 | TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT) | | ||
247 | ((u32)upd_iv << TEGRA_AES_SECURE_IV_SELECT_SHIFT); | ||
248 | |||
249 | if (mode & FLAGS_CBC) { | ||
250 | value |= ((((mode & FLAGS_ENCRYPT) ? 2 : 3) | ||
251 | << TEGRA_AES_SECURE_XOR_POS_SHIFT) | | ||
252 | (((mode & FLAGS_ENCRYPT) ? 2 : 3) | ||
253 | << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT) | | ||
254 | ((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
255 | << TEGRA_AES_SECURE_CORE_SEL_SHIFT); | ||
256 | } else if (mode & FLAGS_OFB) { | ||
257 | value |= ((TEGRA_AES_SECURE_XOR_POS_FIELD) | | ||
258 | (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT) | | ||
259 | (TEGRA_AES_SECURE_CORE_SEL_FIELD)); | ||
260 | } else if (mode & FLAGS_RNG) { | ||
261 | value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
262 | << TEGRA_AES_SECURE_CORE_SEL_SHIFT | | ||
263 | TEGRA_AES_SECURE_RNG_ENB_FIELD); | ||
264 | } else { | ||
265 | value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
266 | << TEGRA_AES_SECURE_CORE_SEL_SHIFT); | ||
267 | } | ||
268 | |||
269 | dev_dbg(dd->dev, "secure_in_sel=0x%x", value); | ||
270 | aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT); | ||
271 | |||
272 | aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR); | ||
273 | INIT_COMPLETION(dd->op_complete); | ||
274 | |||
275 | for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) { | ||
276 | do { | ||
277 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | ||
278 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | ||
279 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | ||
280 | } while (eng_busy & (!icq_empty)); | ||
281 | aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR); | ||
282 | } | ||
283 | |||
284 | ret = wait_for_completion_timeout(&dd->op_complete, | ||
285 | msecs_to_jiffies(150)); | ||
286 | if (ret == 0) { | ||
287 | dev_err(dd->dev, "timed out (0x%x)\n", | ||
288 | aes_readl(dd, TEGRA_AES_INTR_STATUS)); | ||
289 | return -ETIMEDOUT; | ||
290 | } | ||
291 | |||
292 | aes_writel(dd, cmdq[AES_HW_MAX_ICQ_LENGTH - 1], TEGRA_AES_ICMDQUE_WR); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static void aes_release_key_slot(struct tegra_aes_slot *slot) | ||
297 | { | ||
298 | if (slot->slot_num == SSK_SLOT_NUM) | ||
299 | return; | ||
300 | |||
301 | spin_lock(&list_lock); | ||
302 | list_add_tail(&slot->node, &dev_list); | ||
303 | slot = NULL; | ||
304 | spin_unlock(&list_lock); | ||
305 | } | ||
306 | |||
307 | static struct tegra_aes_slot *aes_find_key_slot(void) | ||
308 | { | ||
309 | struct tegra_aes_slot *slot = NULL; | ||
310 | struct list_head *new_head; | ||
311 | int empty; | ||
312 | |||
313 | spin_lock(&list_lock); | ||
314 | empty = list_empty(&dev_list); | ||
315 | if (!empty) { | ||
316 | slot = list_entry(&dev_list, struct tegra_aes_slot, node); | ||
317 | new_head = dev_list.next; | ||
318 | list_del(&dev_list); | ||
319 | dev_list.next = new_head->next; | ||
320 | dev_list.prev = NULL; | ||
321 | } | ||
322 | spin_unlock(&list_lock); | ||
323 | |||
324 | return slot; | ||
325 | } | ||
326 | |||
327 | static int aes_set_key(struct tegra_aes_dev *dd) | ||
328 | { | ||
329 | u32 value, cmdq[2]; | ||
330 | struct tegra_aes_ctx *ctx = dd->ctx; | ||
331 | int eng_busy, icq_empty, dma_busy; | ||
332 | bool use_ssk = false; | ||
333 | |||
334 | /* use ssk? */ | ||
335 | if (!dd->ctx->slot) { | ||
336 | dev_dbg(dd->dev, "using ssk"); | ||
337 | dd->ctx->slot = &ssk; | ||
338 | use_ssk = true; | ||
339 | } | ||
340 | |||
341 | /* enable key schedule generation in hardware */ | ||
342 | value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG_EXT); | ||
343 | value &= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD; | ||
344 | aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG_EXT); | ||
345 | |||
346 | /* select the key slot */ | ||
347 | value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG); | ||
348 | value &= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD; | ||
349 | value |= (ctx->slot->slot_num << TEGRA_AES_SECURE_KEY_INDEX_SHIFT); | ||
350 | aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG); | ||
351 | |||
352 | if (use_ssk) | ||
353 | return 0; | ||
354 | |||
355 | /* copy the key table from sdram to vram */ | ||
356 | cmdq[0] = CMD_MEMDMAVD << CMDQ_OPCODE_SHIFT | | ||
357 | MEMDMA_DIR_DTOVRAM << MEMDMA_DIR_SHIFT | | ||
358 | AES_HW_KEY_TABLE_LENGTH_BYTES / sizeof(u32) << | ||
359 | MEMDMA_NUM_WORDS_SHIFT; | ||
360 | cmdq[1] = (u32)dd->ivkey_phys_base; | ||
361 | |||
362 | aes_writel(dd, cmdq[0], TEGRA_AES_ICMDQUE_WR); | ||
363 | aes_writel(dd, cmdq[1], TEGRA_AES_ICMDQUE_WR); | ||
364 | |||
365 | do { | ||
366 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | ||
367 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | ||
368 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | ||
369 | dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD; | ||
370 | } while (eng_busy & (!icq_empty) & dma_busy); | ||
371 | |||
372 | /* settable command to get key into internal registers */ | ||
373 | value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT | | ||
374 | SUBCMD_CRYPTO_TABLE_SEL << CMDQ_TABLESEL_SHIFT | | ||
375 | SUBCMD_VRAM_SEL << CMDQ_VRAMSEL_SHIFT | | ||
376 | (SUBCMD_KEY_TABLE_SEL | ctx->slot->slot_num) << | ||
377 | CMDQ_KEYTABLEID_SHIFT; | ||
378 | aes_writel(dd, value, TEGRA_AES_ICMDQUE_WR); | ||
379 | |||
380 | do { | ||
381 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | ||
382 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | ||
383 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | ||
384 | } while (eng_busy & (!icq_empty)); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static int tegra_aes_handle_req(struct tegra_aes_dev *dd) | ||
390 | { | ||
391 | struct crypto_async_request *async_req, *backlog; | ||
392 | struct crypto_ablkcipher *tfm; | ||
393 | struct tegra_aes_ctx *ctx; | ||
394 | struct tegra_aes_reqctx *rctx; | ||
395 | struct ablkcipher_request *req; | ||
396 | unsigned long flags; | ||
397 | int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES; | ||
398 | int ret = 0, nblocks, total; | ||
399 | int count = 0; | ||
400 | dma_addr_t addr_in, addr_out; | ||
401 | struct scatterlist *in_sg, *out_sg; | ||
402 | |||
403 | if (!dd) | ||
404 | return -EINVAL; | ||
405 | |||
406 | spin_lock_irqsave(&dd->lock, flags); | ||
407 | backlog = crypto_get_backlog(&dd->queue); | ||
408 | async_req = crypto_dequeue_request(&dd->queue); | ||
409 | if (!async_req) | ||
410 | clear_bit(FLAGS_BUSY, &dd->flags); | ||
411 | spin_unlock_irqrestore(&dd->lock, flags); | ||
412 | |||
413 | if (!async_req) | ||
414 | return -ENODATA; | ||
415 | |||
416 | if (backlog) | ||
417 | backlog->complete(backlog, -EINPROGRESS); | ||
418 | |||
419 | req = ablkcipher_request_cast(async_req); | ||
420 | |||
421 | dev_dbg(dd->dev, "%s: get new req\n", __func__); | ||
422 | |||
423 | if (!req->src || !req->dst) | ||
424 | return -EINVAL; | ||
425 | |||
426 | /* take mutex to access the aes hw */ | ||
427 | mutex_lock(&aes_lock); | ||
428 | |||
429 | /* assign new request to device */ | ||
430 | dd->req = req; | ||
431 | dd->total = req->nbytes; | ||
432 | dd->in_offset = 0; | ||
433 | dd->in_sg = req->src; | ||
434 | dd->out_offset = 0; | ||
435 | dd->out_sg = req->dst; | ||
436 | |||
437 | in_sg = dd->in_sg; | ||
438 | out_sg = dd->out_sg; | ||
439 | |||
440 | total = dd->total; | ||
441 | |||
442 | tfm = crypto_ablkcipher_reqtfm(req); | ||
443 | rctx = ablkcipher_request_ctx(req); | ||
444 | ctx = crypto_ablkcipher_ctx(tfm); | ||
445 | rctx->mode &= FLAGS_MODE_MASK; | ||
446 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; | ||
447 | |||
448 | dd->iv = (u8 *)req->info; | ||
449 | dd->ivlen = crypto_ablkcipher_ivsize(tfm); | ||
450 | |||
451 | /* assign new context to device */ | ||
452 | ctx->dd = dd; | ||
453 | dd->ctx = ctx; | ||
454 | |||
455 | if (ctx->flags & FLAGS_NEW_KEY) { | ||
456 | /* copy the key */ | ||
457 | memcpy(dd->ivkey_base, ctx->key, ctx->keylen); | ||
458 | memset(dd->ivkey_base + ctx->keylen, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - ctx->keylen); | ||
459 | aes_set_key(dd); | ||
460 | ctx->flags &= ~FLAGS_NEW_KEY; | ||
461 | } | ||
462 | |||
463 | if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && dd->iv) { | ||
464 | /* set iv to the aes hw slot | ||
465 | * Hw generates updated iv only after iv is set in slot. | ||
466 | * So key and iv is passed asynchronously. | ||
467 | */ | ||
468 | memcpy(dd->buf_in, dd->iv, dd->ivlen); | ||
469 | |||
470 | ret = aes_start_crypt(dd, (u32)dd->dma_buf_in, | ||
471 | dd->dma_buf_out, 1, FLAGS_CBC, false); | ||
472 | if (ret < 0) { | ||
473 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
474 | goto out; | ||
475 | } | ||
476 | } | ||
477 | |||
478 | while (total) { | ||
479 | dev_dbg(dd->dev, "remain: %d\n", total); | ||
480 | ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE); | ||
481 | if (!ret) { | ||
482 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
483 | goto out; | ||
484 | } | ||
485 | |||
486 | ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE); | ||
487 | if (!ret) { | ||
488 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
489 | dma_unmap_sg(dd->dev, dd->in_sg, | ||
490 | 1, DMA_TO_DEVICE); | ||
491 | goto out; | ||
492 | } | ||
493 | |||
494 | addr_in = sg_dma_address(in_sg); | ||
495 | addr_out = sg_dma_address(out_sg); | ||
496 | dd->flags |= FLAGS_FAST; | ||
497 | count = min_t(int, sg_dma_len(in_sg), dma_max); | ||
498 | WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg)); | ||
499 | nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE); | ||
500 | |||
501 | ret = aes_start_crypt(dd, addr_in, addr_out, nblocks, | ||
502 | dd->flags, true); | ||
503 | |||
504 | dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE); | ||
505 | dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE); | ||
506 | |||
507 | if (ret < 0) { | ||
508 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
509 | goto out; | ||
510 | } | ||
511 | dd->flags &= ~FLAGS_FAST; | ||
512 | |||
513 | dev_dbg(dd->dev, "out: copied %d\n", count); | ||
514 | total -= count; | ||
515 | in_sg = sg_next(in_sg); | ||
516 | out_sg = sg_next(out_sg); | ||
517 | WARN_ON(((total != 0) && (!in_sg || !out_sg))); | ||
518 | } | ||
519 | |||
520 | out: | ||
521 | mutex_unlock(&aes_lock); | ||
522 | |||
523 | dd->total = total; | ||
524 | |||
525 | if (dd->req->base.complete) | ||
526 | dd->req->base.complete(&dd->req->base, ret); | ||
527 | |||
528 | dev_dbg(dd->dev, "%s: exit\n", __func__); | ||
529 | return ret; | ||
530 | } | ||
531 | |||
532 | static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
533 | unsigned int keylen) | ||
534 | { | ||
535 | struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
536 | struct tegra_aes_dev *dd = aes_dev; | ||
537 | struct tegra_aes_slot *key_slot; | ||
538 | |||
539 | if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) && | ||
540 | (keylen != AES_KEYSIZE_256)) { | ||
541 | dev_err(dd->dev, "unsupported key size\n"); | ||
542 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | |||
546 | dev_dbg(dd->dev, "keylen: %d\n", keylen); | ||
547 | |||
548 | ctx->dd = dd; | ||
549 | |||
550 | if (key) { | ||
551 | if (!ctx->slot) { | ||
552 | key_slot = aes_find_key_slot(); | ||
553 | if (!key_slot) { | ||
554 | dev_err(dd->dev, "no empty slot\n"); | ||
555 | return -ENOMEM; | ||
556 | } | ||
557 | |||
558 | ctx->slot = key_slot; | ||
559 | } | ||
560 | |||
561 | memcpy(ctx->key, key, keylen); | ||
562 | ctx->keylen = keylen; | ||
563 | } | ||
564 | |||
565 | ctx->flags |= FLAGS_NEW_KEY; | ||
566 | dev_dbg(dd->dev, "done\n"); | ||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static void aes_workqueue_handler(struct work_struct *work) | ||
571 | { | ||
572 | struct tegra_aes_dev *dd = aes_dev; | ||
573 | int ret; | ||
574 | |||
575 | ret = clk_enable(dd->aes_clk); | ||
576 | if (ret) | ||
577 | BUG_ON("clock enable failed"); | ||
578 | |||
579 | /* empty the crypto queue and then return */ | ||
580 | do { | ||
581 | ret = tegra_aes_handle_req(dd); | ||
582 | } while (!ret); | ||
583 | |||
584 | clk_disable(dd->aes_clk); | ||
585 | } | ||
586 | |||
587 | static irqreturn_t aes_irq(int irq, void *dev_id) | ||
588 | { | ||
589 | struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id; | ||
590 | u32 value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | ||
591 | int busy = test_bit(FLAGS_BUSY, &dd->flags); | ||
592 | |||
593 | if (!busy) { | ||
594 | dev_dbg(dd->dev, "spurious interrupt\n"); | ||
595 | return IRQ_NONE; | ||
596 | } | ||
597 | |||
598 | dev_dbg(dd->dev, "irq_stat: 0x%x\n", value); | ||
599 | if (value & TEGRA_AES_INT_ERROR_MASK) | ||
600 | aes_writel(dd, TEGRA_AES_INT_ERROR_MASK, TEGRA_AES_INTR_STATUS); | ||
601 | |||
602 | if (!(value & TEGRA_AES_ENGINE_BUSY_FIELD)) | ||
603 | complete(&dd->op_complete); | ||
604 | else | ||
605 | return IRQ_NONE; | ||
606 | |||
607 | return IRQ_HANDLED; | ||
608 | } | ||
609 | |||
610 | static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
611 | { | ||
612 | struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
613 | struct tegra_aes_dev *dd = aes_dev; | ||
614 | unsigned long flags; | ||
615 | int err = 0; | ||
616 | int busy; | ||
617 | |||
618 | dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n", | ||
619 | req->nbytes, !!(mode & FLAGS_ENCRYPT), | ||
620 | !!(mode & FLAGS_CBC), !!(mode & FLAGS_OFB)); | ||
621 | |||
622 | rctx->mode = mode; | ||
623 | |||
624 | spin_lock_irqsave(&dd->lock, flags); | ||
625 | err = ablkcipher_enqueue_request(&dd->queue, req); | ||
626 | busy = test_and_set_bit(FLAGS_BUSY, &dd->flags); | ||
627 | spin_unlock_irqrestore(&dd->lock, flags); | ||
628 | |||
629 | if (!busy) | ||
630 | queue_work(aes_wq, &aes_work); | ||
631 | |||
632 | return err; | ||
633 | } | ||
634 | |||
635 | static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
636 | { | ||
637 | return tegra_aes_crypt(req, FLAGS_ENCRYPT); | ||
638 | } | ||
639 | |||
640 | static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
641 | { | ||
642 | return tegra_aes_crypt(req, 0); | ||
643 | } | ||
644 | |||
645 | static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
646 | { | ||
647 | return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); | ||
648 | } | ||
649 | |||
650 | static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
651 | { | ||
652 | return tegra_aes_crypt(req, FLAGS_CBC); | ||
653 | } | ||
654 | |||
655 | static int tegra_aes_ofb_encrypt(struct ablkcipher_request *req) | ||
656 | { | ||
657 | return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_OFB); | ||
658 | } | ||
659 | |||
660 | static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req) | ||
661 | { | ||
662 | return tegra_aes_crypt(req, FLAGS_OFB); | ||
663 | } | ||
664 | |||
665 | static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata, | ||
666 | unsigned int dlen) | ||
667 | { | ||
668 | struct tegra_aes_dev *dd = aes_dev; | ||
669 | struct tegra_aes_ctx *ctx = &rng_ctx; | ||
670 | int ret, i; | ||
671 | u8 *dest = rdata, *dt = dd->dt; | ||
672 | |||
673 | /* take mutex to access the aes hw */ | ||
674 | mutex_lock(&aes_lock); | ||
675 | |||
676 | ret = clk_enable(dd->aes_clk); | ||
677 | if (ret) | ||
678 | return ret; | ||
679 | |||
680 | ctx->dd = dd; | ||
681 | dd->ctx = ctx; | ||
682 | dd->flags = FLAGS_ENCRYPT | FLAGS_RNG; | ||
683 | |||
684 | memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ); | ||
685 | |||
686 | ret = aes_start_crypt(dd, (u32)dd->dma_buf_in, | ||
687 | (u32)dd->dma_buf_out, 1, dd->flags, true); | ||
688 | if (ret < 0) { | ||
689 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
690 | dlen = ret; | ||
691 | goto out; | ||
692 | } | ||
693 | memcpy(dest, dd->buf_out, dlen); | ||
694 | |||
695 | /* update the DT */ | ||
696 | for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) { | ||
697 | dt[i] += 1; | ||
698 | if (dt[i] != 0) | ||
699 | break; | ||
700 | } | ||
701 | |||
702 | out: | ||
703 | clk_disable(dd->aes_clk); | ||
704 | mutex_unlock(&aes_lock); | ||
705 | |||
706 | dev_dbg(dd->dev, "%s: done\n", __func__); | ||
707 | return dlen; | ||
708 | } | ||
709 | |||
710 | static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | ||
711 | unsigned int slen) | ||
712 | { | ||
713 | struct tegra_aes_dev *dd = aes_dev; | ||
714 | struct tegra_aes_ctx *ctx = &rng_ctx; | ||
715 | struct tegra_aes_slot *key_slot; | ||
716 | struct timespec ts; | ||
717 | int ret = 0; | ||
718 | u64 nsec, tmp[2]; | ||
719 | u8 *dt; | ||
720 | |||
721 | if (!ctx || !dd) { | ||
722 | dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n", | ||
723 | (unsigned int)ctx, (unsigned int)dd); | ||
724 | return -EINVAL; | ||
725 | } | ||
726 | |||
727 | if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { | ||
728 | dev_err(dd->dev, "seed size invalid"); | ||
729 | return -ENOMEM; | ||
730 | } | ||
731 | |||
732 | /* take mutex to access the aes hw */ | ||
733 | mutex_lock(&aes_lock); | ||
734 | |||
735 | if (!ctx->slot) { | ||
736 | key_slot = aes_find_key_slot(); | ||
737 | if (!key_slot) { | ||
738 | dev_err(dd->dev, "no empty slot\n"); | ||
739 | mutex_unlock(&aes_lock); | ||
740 | return -ENOMEM; | ||
741 | } | ||
742 | ctx->slot = key_slot; | ||
743 | } | ||
744 | |||
745 | ctx->dd = dd; | ||
746 | dd->ctx = ctx; | ||
747 | dd->ctr = 0; | ||
748 | |||
749 | ctx->keylen = AES_KEYSIZE_128; | ||
750 | ctx->flags |= FLAGS_NEW_KEY; | ||
751 | |||
752 | /* copy the key to the key slot */ | ||
753 | memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128); | ||
754 | memset(dd->ivkey_base + AES_KEYSIZE_128, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - AES_KEYSIZE_128); | ||
755 | |||
756 | dd->iv = seed; | ||
757 | dd->ivlen = slen; | ||
758 | |||
759 | dd->flags = FLAGS_ENCRYPT | FLAGS_RNG; | ||
760 | |||
761 | ret = clk_enable(dd->aes_clk); | ||
762 | if (ret) | ||
763 | return ret; | ||
764 | |||
765 | aes_set_key(dd); | ||
766 | |||
767 | /* set seed to the aes hw slot */ | ||
768 | memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ); | ||
769 | ret = aes_start_crypt(dd, (u32)dd->dma_buf_in, | ||
770 | dd->dma_buf_out, 1, FLAGS_CBC, false); | ||
771 | if (ret < 0) { | ||
772 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
773 | goto out; | ||
774 | } | ||
775 | |||
776 | if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { | ||
777 | dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; | ||
778 | } else { | ||
779 | getnstimeofday(&ts); | ||
780 | nsec = timespec_to_ns(&ts); | ||
781 | do_div(nsec, 1000); | ||
782 | nsec ^= dd->ctr << 56; | ||
783 | dd->ctr++; | ||
784 | tmp[0] = nsec; | ||
785 | tmp[1] = tegra_chip_uid(); | ||
786 | dt = (u8 *)tmp; | ||
787 | } | ||
788 | memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); | ||
789 | |||
790 | out: | ||
791 | clk_disable(dd->aes_clk); | ||
792 | mutex_unlock(&aes_lock); | ||
793 | |||
794 | dev_dbg(dd->dev, "%s: done\n", __func__); | ||
795 | return ret; | ||
796 | } | ||
797 | |||
798 | static int tegra_aes_cra_init(struct crypto_tfm *tfm) | ||
799 | { | ||
800 | tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx); | ||
801 | |||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | void tegra_aes_cra_exit(struct crypto_tfm *tfm) | ||
806 | { | ||
807 | struct tegra_aes_ctx *ctx = | ||
808 | crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); | ||
809 | |||
810 | if (ctx && ctx->slot) | ||
811 | aes_release_key_slot(ctx->slot); | ||
812 | } | ||
813 | |||
814 | static struct crypto_alg algs[] = { | ||
815 | { | ||
816 | .cra_name = "ecb(aes)", | ||
817 | .cra_driver_name = "ecb-aes-tegra", | ||
818 | .cra_priority = 300, | ||
819 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
820 | .cra_blocksize = AES_BLOCK_SIZE, | ||
821 | .cra_alignmask = 3, | ||
822 | .cra_type = &crypto_ablkcipher_type, | ||
823 | .cra_u.ablkcipher = { | ||
824 | .min_keysize = AES_MIN_KEY_SIZE, | ||
825 | .max_keysize = AES_MAX_KEY_SIZE, | ||
826 | .setkey = tegra_aes_setkey, | ||
827 | .encrypt = tegra_aes_ecb_encrypt, | ||
828 | .decrypt = tegra_aes_ecb_decrypt, | ||
829 | }, | ||
830 | }, { | ||
831 | .cra_name = "cbc(aes)", | ||
832 | .cra_driver_name = "cbc-aes-tegra", | ||
833 | .cra_priority = 300, | ||
834 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
835 | .cra_blocksize = AES_BLOCK_SIZE, | ||
836 | .cra_alignmask = 3, | ||
837 | .cra_type = &crypto_ablkcipher_type, | ||
838 | .cra_u.ablkcipher = { | ||
839 | .min_keysize = AES_MIN_KEY_SIZE, | ||
840 | .max_keysize = AES_MAX_KEY_SIZE, | ||
841 | .ivsize = AES_MIN_KEY_SIZE, | ||
842 | .setkey = tegra_aes_setkey, | ||
843 | .encrypt = tegra_aes_cbc_encrypt, | ||
844 | .decrypt = tegra_aes_cbc_decrypt, | ||
845 | } | ||
846 | }, { | ||
847 | .cra_name = "ofb(aes)", | ||
848 | .cra_driver_name = "ofb-aes-tegra", | ||
849 | .cra_priority = 300, | ||
850 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
851 | .cra_blocksize = AES_BLOCK_SIZE, | ||
852 | .cra_alignmask = 3, | ||
853 | .cra_type = &crypto_ablkcipher_type, | ||
854 | .cra_u.ablkcipher = { | ||
855 | .min_keysize = AES_MIN_KEY_SIZE, | ||
856 | .max_keysize = AES_MAX_KEY_SIZE, | ||
857 | .ivsize = AES_MIN_KEY_SIZE, | ||
858 | .setkey = tegra_aes_setkey, | ||
859 | .encrypt = tegra_aes_ofb_encrypt, | ||
860 | .decrypt = tegra_aes_ofb_decrypt, | ||
861 | } | ||
862 | }, { | ||
863 | .cra_name = "ansi_cprng", | ||
864 | .cra_driver_name = "rng-aes-tegra", | ||
865 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | ||
866 | .cra_ctxsize = sizeof(struct tegra_aes_ctx), | ||
867 | .cra_type = &crypto_rng_type, | ||
868 | .cra_u.rng = { | ||
869 | .rng_make_random = tegra_aes_get_random, | ||
870 | .rng_reset = tegra_aes_rng_reset, | ||
871 | .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ), | ||
872 | } | ||
873 | } | ||
874 | }; | ||
875 | |||
876 | static int tegra_aes_probe(struct platform_device *pdev) | ||
877 | { | ||
878 | struct device *dev = &pdev->dev; | ||
879 | struct tegra_aes_dev *dd; | ||
880 | struct resource *res; | ||
881 | int err = -ENOMEM, i = 0, j; | ||
882 | |||
883 | dd = devm_kzalloc(dev, sizeof(struct tegra_aes_dev), GFP_KERNEL); | ||
884 | if (dd == NULL) { | ||
885 | dev_err(dev, "unable to alloc data struct.\n"); | ||
886 | return err; | ||
887 | } | ||
888 | |||
889 | dd->dev = dev; | ||
890 | platform_set_drvdata(pdev, dd); | ||
891 | |||
892 | dd->slots = devm_kzalloc(dev, sizeof(struct tegra_aes_slot) * | ||
893 | AES_NR_KEYSLOTS, GFP_KERNEL); | ||
894 | if (dd->slots == NULL) { | ||
895 | dev_err(dev, "unable to alloc slot struct.\n"); | ||
896 | goto out; | ||
897 | } | ||
898 | |||
899 | spin_lock_init(&dd->lock); | ||
900 | crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH); | ||
901 | |||
902 | /* Get the module base address */ | ||
903 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
904 | if (!res) { | ||
905 | dev_err(dev, "invalid resource type: base\n"); | ||
906 | err = -ENODEV; | ||
907 | goto out; | ||
908 | } | ||
909 | |||
910 | if (!devm_request_mem_region(&pdev->dev, res->start, | ||
911 | resource_size(res), | ||
912 | dev_name(&pdev->dev))) { | ||
913 | dev_err(&pdev->dev, "Couldn't request MEM resource\n"); | ||
914 | return -ENODEV; | ||
915 | } | ||
916 | |||
917 | dd->io_base = devm_ioremap(dev, res->start, resource_size(res)); | ||
918 | if (!dd->io_base) { | ||
919 | dev_err(dev, "can't ioremap register space\n"); | ||
920 | err = -ENOMEM; | ||
921 | goto out; | ||
922 | } | ||
923 | |||
924 | /* Initialize the vde clock */ | ||
925 | dd->aes_clk = clk_get(dev, "vde"); | ||
926 | if (IS_ERR(dd->aes_clk)) { | ||
927 | dev_err(dev, "iclock intialization failed.\n"); | ||
928 | err = -ENODEV; | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | err = clk_set_rate(dd->aes_clk, ULONG_MAX); | ||
933 | if (err) { | ||
934 | dev_err(dd->dev, "iclk set_rate fail(%d)\n", err); | ||
935 | goto out; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * the foll contiguous memory is allocated as follows - | ||
940 | * - hardware key table | ||
941 | * - key schedule | ||
942 | */ | ||
943 | dd->ivkey_base = dma_alloc_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES, | ||
944 | &dd->ivkey_phys_base, | ||
945 | GFP_KERNEL); | ||
946 | if (!dd->ivkey_base) { | ||
947 | dev_err(dev, "can not allocate iv/key buffer\n"); | ||
948 | err = -ENOMEM; | ||
949 | goto out; | ||
950 | } | ||
951 | |||
952 | dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
953 | &dd->dma_buf_in, GFP_KERNEL); | ||
954 | if (!dd->buf_in) { | ||
955 | dev_err(dev, "can not allocate dma-in buffer\n"); | ||
956 | err = -ENOMEM; | ||
957 | goto out; | ||
958 | } | ||
959 | |||
960 | dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
961 | &dd->dma_buf_out, GFP_KERNEL); | ||
962 | if (!dd->buf_out) { | ||
963 | dev_err(dev, "can not allocate dma-out buffer\n"); | ||
964 | err = -ENOMEM; | ||
965 | goto out; | ||
966 | } | ||
967 | |||
968 | init_completion(&dd->op_complete); | ||
969 | aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1); | ||
970 | if (!aes_wq) { | ||
971 | dev_err(dev, "alloc_workqueue failed\n"); | ||
972 | goto out; | ||
973 | } | ||
974 | |||
975 | /* get the irq */ | ||
976 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
977 | if (!res) { | ||
978 | dev_err(dev, "invalid resource type: base\n"); | ||
979 | err = -ENODEV; | ||
980 | goto out; | ||
981 | } | ||
982 | dd->irq = res->start; | ||
983 | |||
984 | err = devm_request_irq(dev, dd->irq, aes_irq, IRQF_TRIGGER_HIGH | | ||
985 | IRQF_SHARED, "tegra-aes", dd); | ||
986 | if (err) { | ||
987 | dev_err(dev, "request_irq failed\n"); | ||
988 | goto out; | ||
989 | } | ||
990 | |||
991 | mutex_init(&aes_lock); | ||
992 | INIT_LIST_HEAD(&dev_list); | ||
993 | |||
994 | spin_lock_init(&list_lock); | ||
995 | spin_lock(&list_lock); | ||
996 | for (i = 0; i < AES_NR_KEYSLOTS; i++) { | ||
997 | if (i == SSK_SLOT_NUM) | ||
998 | continue; | ||
999 | dd->slots[i].slot_num = i; | ||
1000 | INIT_LIST_HEAD(&dd->slots[i].node); | ||
1001 | list_add_tail(&dd->slots[i].node, &dev_list); | ||
1002 | } | ||
1003 | spin_unlock(&list_lock); | ||
1004 | |||
1005 | aes_dev = dd; | ||
1006 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
1007 | INIT_LIST_HEAD(&algs[i].cra_list); | ||
1008 | |||
1009 | algs[i].cra_priority = 300; | ||
1010 | algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx); | ||
1011 | algs[i].cra_module = THIS_MODULE; | ||
1012 | algs[i].cra_init = tegra_aes_cra_init; | ||
1013 | algs[i].cra_exit = tegra_aes_cra_exit; | ||
1014 | |||
1015 | err = crypto_register_alg(&algs[i]); | ||
1016 | if (err) | ||
1017 | goto out; | ||
1018 | } | ||
1019 | |||
1020 | dev_info(dev, "registered"); | ||
1021 | return 0; | ||
1022 | |||
1023 | out: | ||
1024 | for (j = 0; j < i; j++) | ||
1025 | crypto_unregister_alg(&algs[j]); | ||
1026 | if (dd->ivkey_base) | ||
1027 | dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES, | ||
1028 | dd->ivkey_base, dd->ivkey_phys_base); | ||
1029 | if (dd->buf_in) | ||
1030 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1031 | dd->buf_in, dd->dma_buf_in); | ||
1032 | if (dd->buf_out) | ||
1033 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1034 | dd->buf_out, dd->dma_buf_out); | ||
1035 | if (IS_ERR(dd->aes_clk)) | ||
1036 | clk_put(dd->aes_clk); | ||
1037 | if (aes_wq) | ||
1038 | destroy_workqueue(aes_wq); | ||
1039 | spin_lock(&list_lock); | ||
1040 | list_del(&dev_list); | ||
1041 | spin_unlock(&list_lock); | ||
1042 | |||
1043 | aes_dev = NULL; | ||
1044 | |||
1045 | dev_err(dev, "%s: initialization failed.\n", __func__); | ||
1046 | return err; | ||
1047 | } | ||
1048 | |||
1049 | static int __devexit tegra_aes_remove(struct platform_device *pdev) | ||
1050 | { | ||
1051 | struct device *dev = &pdev->dev; | ||
1052 | struct tegra_aes_dev *dd = platform_get_drvdata(pdev); | ||
1053 | int i; | ||
1054 | |||
1055 | for (i = 0; i < ARRAY_SIZE(algs); i++) | ||
1056 | crypto_unregister_alg(&algs[i]); | ||
1057 | |||
1058 | cancel_work_sync(&aes_work); | ||
1059 | destroy_workqueue(aes_wq); | ||
1060 | spin_lock(&list_lock); | ||
1061 | list_del(&dev_list); | ||
1062 | spin_unlock(&list_lock); | ||
1063 | |||
1064 | dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES, | ||
1065 | dd->ivkey_base, dd->ivkey_phys_base); | ||
1066 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1067 | dd->buf_in, dd->dma_buf_in); | ||
1068 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1069 | dd->buf_out, dd->dma_buf_out); | ||
1070 | clk_put(dd->aes_clk); | ||
1071 | aes_dev = NULL; | ||
1072 | |||
1073 | return 0; | ||
1074 | } | ||
1075 | |||
1076 | static struct of_device_id tegra_aes_of_match[] __devinitdata = { | ||
1077 | { .compatible = "nvidia,tegra20-aes", }, | ||
1078 | { .compatible = "nvidia,tegra30-aes", }, | ||
1079 | { }, | ||
1080 | }; | ||
1081 | |||
1082 | static struct platform_driver tegra_aes_driver = { | ||
1083 | .probe = tegra_aes_probe, | ||
1084 | .remove = __devexit_p(tegra_aes_remove), | ||
1085 | .driver = { | ||
1086 | .name = "tegra-aes", | ||
1087 | .owner = THIS_MODULE, | ||
1088 | .of_match_table = tegra_aes_of_match, | ||
1089 | }, | ||
1090 | }; | ||
1091 | |||
1092 | module_platform_driver(tegra_aes_driver); | ||
1093 | |||
1094 | MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support."); | ||
1095 | MODULE_AUTHOR("NVIDIA Corporation"); | ||
1096 | MODULE_LICENSE("GPL v2"); | ||