aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Warren <swarren@nvidia.com>2014-02-18 16:42:57 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-02-26 16:56:57 -0500
commit645af2e43705d8c80c9606d5c5e83eda9a1a1a2e (patch)
tree026c965f1498f721ce6e9d5a4d83a1cdc07e26d2
parent701d0f1940925d40f6b8a40f6c278d14be71402b (diff)
crypto: tegra - remove driver
This driver has never been hooked up in any board file, and cannot be instantiated via device tree. I've been told that, at least on Tegra20, the HW is slower at crypto than the main CPU. I have no test-case for it. Hence, remove it. Cc: Varun Wadekar <vwadekar@nvidia.com> Signed-off-by: Stephen Warren <swarren@nvidia.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/tegra-aes.c1087
-rw-r--r--drivers/crypto/tegra-aes.h103
4 files changed, 0 insertions, 1202 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 8e38000990d7..03ccdb0ccf9e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -311,17 +311,6 @@ config CRYPTO_DEV_S5P
311 Select this to offload Samsung S5PV210 or S5PC110 from AES 311 Select this to offload Samsung S5PV210 or S5PC110 from AES
312 algorithms execution. 312 algorithms execution.
313 313
314config CRYPTO_DEV_TEGRA_AES
315 tristate "Support for TEGRA AES hw engine"
316 depends on ARCH_TEGRA
317 select CRYPTO_AES
318 help
319 TEGRA processors have AES module accelerator. Select this if you
320 want to use the TEGRA module for AES algorithms.
321
322 To compile this driver as a module, choose M here: the module
323 will be called tegra-aes.
324
325config CRYPTO_DEV_NX 314config CRYPTO_DEV_NX
326 bool "Support for IBM Power7+ in-Nest cryptographic acceleration" 315 bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
327 depends on PPC64 && IBMVIO 316 depends on PPC64 && IBMVIO
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9a53fb8ceff9..482f090d16d0 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -22,5 +22,4 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
22obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o 22obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
23obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o 23obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
24obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 24obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
25obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
26obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ 25obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
deleted file mode 100644
index 060eecc5dbc3..000000000000
--- a/drivers/crypto/tegra-aes.c
+++ /dev/null
@@ -1,1087 +0,0 @@
1/*
2 * drivers/crypto/tegra-aes.c
3 *
4 * Driver for NVIDIA Tegra AES hardware engine residing inside the
5 * Bit Stream Engine for Video (BSEV) hardware block.
6 *
7 * The programming sequence for this engine is with the help
8 * of commands which travel via a command queue residing between the
9 * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM)
10 * where the final input plaintext, keys and the IV have to be copied
11 * before starting the encrypt/decrypt operation.
12 *
13 * Copyright (c) 2010, NVIDIA Corporation.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful, but WITHOUT
21 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 * more details.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
28 */
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/errno.h>
35#include <linux/kernel.h>
36#include <linux/clk.h>
37#include <linux/platform_device.h>
38#include <linux/scatterlist.h>
39#include <linux/dma-mapping.h>
40#include <linux/io.h>
41#include <linux/mutex.h>
42#include <linux/interrupt.h>
43#include <linux/completion.h>
44#include <linux/workqueue.h>
45
46#include <crypto/scatterwalk.h>
47#include <crypto/aes.h>
48#include <crypto/internal/rng.h>
49
50#include "tegra-aes.h"
51
52#define FLAGS_MODE_MASK 0x00FF
53#define FLAGS_ENCRYPT BIT(0)
54#define FLAGS_CBC BIT(1)
55#define FLAGS_GIV BIT(2)
56#define FLAGS_RNG BIT(3)
57#define FLAGS_OFB BIT(4)
58#define FLAGS_NEW_KEY BIT(5)
59#define FLAGS_NEW_IV BIT(6)
60#define FLAGS_INIT BIT(7)
61#define FLAGS_FAST BIT(8)
62#define FLAGS_BUSY 9
63
64/*
65 * Defines AES engine Max process bytes size in one go, which takes 1 msec.
66 * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
67 * The duration CPU can use the BSE to 1 msec, then the number of available
68 * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
69 * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
70 */
71#define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
72
73/*
74 * The key table length is 64 bytes
75 * (This includes first upto 32 bytes key + 16 bytes original initial vector
76 * and 16 bytes updated initial vector)
77 */
78#define AES_HW_KEY_TABLE_LENGTH_BYTES 64
79
80/*
81 * The memory being used is divides as follows:
82 * 1. Key - 32 bytes
83 * 2. Original IV - 16 bytes
84 * 3. Updated IV - 16 bytes
85 * 4. Key schedule - 256 bytes
86 *
87 * 1+2+3 constitute the hw key table.
88 */
89#define AES_HW_IV_SIZE 16
90#define AES_HW_KEYSCHEDULE_LEN 256
91#define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
92
93/* Define commands required for AES operation */
94enum {
95 CMD_BLKSTARTENGINE = 0x0E,
96 CMD_DMASETUP = 0x10,
97 CMD_DMACOMPLETE = 0x11,
98 CMD_SETTABLE = 0x15,
99 CMD_MEMDMAVD = 0x22,
100};
101
102/* Define sub-commands */
103enum {
104 SUBCMD_VRAM_SEL = 0x1,
105 SUBCMD_CRYPTO_TABLE_SEL = 0x3,
106 SUBCMD_KEY_TABLE_SEL = 0x8,
107};
108
109/* memdma_vd command */
110#define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */
111#define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */
112#define MEMDMA_DIR_SHIFT 25
113#define MEMDMA_NUM_WORDS_SHIFT 12
114
115/* command queue bit shifts */
116enum {
117 CMDQ_KEYTABLEADDR_SHIFT = 0,
118 CMDQ_KEYTABLEID_SHIFT = 17,
119 CMDQ_VRAMSEL_SHIFT = 23,
120 CMDQ_TABLESEL_SHIFT = 24,
121 CMDQ_OPCODE_SHIFT = 26,
122};
123
124/*
125 * The secure key slot contains a unique secure key generated
126 * and loaded by the bootloader. This slot is marked as non-accessible
127 * to the kernel.
128 */
129#define SSK_SLOT_NUM 4
130
131#define AES_NR_KEYSLOTS 8
132#define TEGRA_AES_QUEUE_LENGTH 50
133#define DEFAULT_RNG_BLK_SZ 16
134
135/* The command queue depth */
136#define AES_HW_MAX_ICQ_LENGTH 5
137
138struct tegra_aes_slot {
139 struct list_head node;
140 int slot_num;
141};
142
143static struct tegra_aes_slot ssk = {
144 .slot_num = SSK_SLOT_NUM,
145};
146
147struct tegra_aes_reqctx {
148 unsigned long mode;
149};
150
151struct tegra_aes_dev {
152 struct device *dev;
153 void __iomem *io_base;
154 dma_addr_t ivkey_phys_base;
155 void __iomem *ivkey_base;
156 struct clk *aes_clk;
157 struct tegra_aes_ctx *ctx;
158 int irq;
159 unsigned long flags;
160 struct completion op_complete;
161 u32 *buf_in;
162 dma_addr_t dma_buf_in;
163 u32 *buf_out;
164 dma_addr_t dma_buf_out;
165 u8 *iv;
166 u8 dt[DEFAULT_RNG_BLK_SZ];
167 int ivlen;
168 u64 ctr;
169 spinlock_t lock;
170 struct crypto_queue queue;
171 struct tegra_aes_slot *slots;
172 struct ablkcipher_request *req;
173 size_t total;
174 struct scatterlist *in_sg;
175 size_t in_offset;
176 struct scatterlist *out_sg;
177 size_t out_offset;
178};
179
180static struct tegra_aes_dev *aes_dev;
181
182struct tegra_aes_ctx {
183 struct tegra_aes_dev *dd;
184 unsigned long flags;
185 struct tegra_aes_slot *slot;
186 u8 key[AES_MAX_KEY_SIZE];
187 size_t keylen;
188};
189
190static struct tegra_aes_ctx rng_ctx = {
191 .flags = FLAGS_NEW_KEY,
192 .keylen = AES_KEYSIZE_128,
193};
194
195/* keep registered devices data here */
196static struct list_head dev_list;
197static DEFINE_SPINLOCK(list_lock);
198static DEFINE_MUTEX(aes_lock);
199
200static void aes_workqueue_handler(struct work_struct *work);
201static DECLARE_WORK(aes_work, aes_workqueue_handler);
202static struct workqueue_struct *aes_wq;
203
204static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
205{
206 return readl(dd->io_base + offset);
207}
208
209static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset)
210{
211 writel(val, dd->io_base + offset);
212}
213
214static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
215 int nblocks, int mode, bool upd_iv)
216{
217 u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
218 int i, eng_busy, icq_empty, ret;
219 u32 value;
220
221 /* reset all the interrupt bits */
222 aes_writel(dd, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS);
223
224 /* enable error, dma xfer complete interrupts */
225 aes_writel(dd, 0x33, TEGRA_AES_INT_ENB);
226
227 cmdq[0] = CMD_DMASETUP << CMDQ_OPCODE_SHIFT;
228 cmdq[1] = in_addr;
229 cmdq[2] = CMD_BLKSTARTENGINE << CMDQ_OPCODE_SHIFT | (nblocks-1);
230 cmdq[3] = CMD_DMACOMPLETE << CMDQ_OPCODE_SHIFT;
231
232 value = aes_readl(dd, TEGRA_AES_CMDQUE_CONTROL);
233 /* access SDRAM through AHB */
234 value &= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD;
235 value &= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD;
236 value |= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD |
237 TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD |
238 TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD;
239 aes_writel(dd, value, TEGRA_AES_CMDQUE_CONTROL);
240 dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value);
241
242 value = (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT) |
243 ((dd->ctx->keylen * 8) <<
244 TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT) |
245 ((u32)upd_iv << TEGRA_AES_SECURE_IV_SELECT_SHIFT);
246
247 if (mode & FLAGS_CBC) {
248 value |= ((((mode & FLAGS_ENCRYPT) ? 2 : 3)
249 << TEGRA_AES_SECURE_XOR_POS_SHIFT) |
250 (((mode & FLAGS_ENCRYPT) ? 2 : 3)
251 << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT) |
252 ((mode & FLAGS_ENCRYPT) ? 1 : 0)
253 << TEGRA_AES_SECURE_CORE_SEL_SHIFT);
254 } else if (mode & FLAGS_OFB) {
255 value |= ((TEGRA_AES_SECURE_XOR_POS_FIELD) |
256 (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT) |
257 (TEGRA_AES_SECURE_CORE_SEL_FIELD));
258 } else if (mode & FLAGS_RNG) {
259 value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0)
260 << TEGRA_AES_SECURE_CORE_SEL_SHIFT |
261 TEGRA_AES_SECURE_RNG_ENB_FIELD);
262 } else {
263 value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0)
264 << TEGRA_AES_SECURE_CORE_SEL_SHIFT);
265 }
266
267 dev_dbg(dd->dev, "secure_in_sel=0x%x", value);
268 aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT);
269
270 aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR);
271 reinit_completion(&dd->op_complete);
272
273 for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) {
274 do {
275 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
276 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
277 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
278 } while (eng_busy && !icq_empty);
279 aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR);
280 }
281
282 ret = wait_for_completion_timeout(&dd->op_complete,
283 msecs_to_jiffies(150));
284 if (ret == 0) {
285 dev_err(dd->dev, "timed out (0x%x)\n",
286 aes_readl(dd, TEGRA_AES_INTR_STATUS));
287 return -ETIMEDOUT;
288 }
289
290 aes_writel(dd, cmdq[AES_HW_MAX_ICQ_LENGTH - 1], TEGRA_AES_ICMDQUE_WR);
291 return 0;
292}
293
294static void aes_release_key_slot(struct tegra_aes_slot *slot)
295{
296 if (slot->slot_num == SSK_SLOT_NUM)
297 return;
298
299 spin_lock(&list_lock);
300 list_add_tail(&slot->node, &dev_list);
301 slot = NULL;
302 spin_unlock(&list_lock);
303}
304
305static struct tegra_aes_slot *aes_find_key_slot(void)
306{
307 struct tegra_aes_slot *slot = NULL;
308 struct list_head *new_head;
309 int empty;
310
311 spin_lock(&list_lock);
312 empty = list_empty(&dev_list);
313 if (!empty) {
314 slot = list_entry(&dev_list, struct tegra_aes_slot, node);
315 new_head = dev_list.next;
316 list_del(&dev_list);
317 dev_list.next = new_head->next;
318 dev_list.prev = NULL;
319 }
320 spin_unlock(&list_lock);
321
322 return slot;
323}
324
325static int aes_set_key(struct tegra_aes_dev *dd)
326{
327 u32 value, cmdq[2];
328 struct tegra_aes_ctx *ctx = dd->ctx;
329 int eng_busy, icq_empty, dma_busy;
330 bool use_ssk = false;
331
332 /* use ssk? */
333 if (!dd->ctx->slot) {
334 dev_dbg(dd->dev, "using ssk");
335 dd->ctx->slot = &ssk;
336 use_ssk = true;
337 }
338
339 /* enable key schedule generation in hardware */
340 value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG_EXT);
341 value &= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD;
342 aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG_EXT);
343
344 /* select the key slot */
345 value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG);
346 value &= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD;
347 value |= (ctx->slot->slot_num << TEGRA_AES_SECURE_KEY_INDEX_SHIFT);
348 aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG);
349
350 if (use_ssk)
351 return 0;
352
353 /* copy the key table from sdram to vram */
354 cmdq[0] = CMD_MEMDMAVD << CMDQ_OPCODE_SHIFT |
355 MEMDMA_DIR_DTOVRAM << MEMDMA_DIR_SHIFT |
356 AES_HW_KEY_TABLE_LENGTH_BYTES / sizeof(u32) <<
357 MEMDMA_NUM_WORDS_SHIFT;
358 cmdq[1] = (u32)dd->ivkey_phys_base;
359
360 aes_writel(dd, cmdq[0], TEGRA_AES_ICMDQUE_WR);
361 aes_writel(dd, cmdq[1], TEGRA_AES_ICMDQUE_WR);
362
363 do {
364 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
365 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
366 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
367 dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD;
368 } while (eng_busy && !icq_empty && dma_busy);
369
370 /* settable command to get key into internal registers */
371 value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT |
372 SUBCMD_CRYPTO_TABLE_SEL << CMDQ_TABLESEL_SHIFT |
373 SUBCMD_VRAM_SEL << CMDQ_VRAMSEL_SHIFT |
374 (SUBCMD_KEY_TABLE_SEL | ctx->slot->slot_num) <<
375 CMDQ_KEYTABLEID_SHIFT;
376 aes_writel(dd, value, TEGRA_AES_ICMDQUE_WR);
377
378 do {
379 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
380 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
381 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
382 } while (eng_busy && !icq_empty);
383
384 return 0;
385}
386
387static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
388{
389 struct crypto_async_request *async_req, *backlog;
390 struct crypto_ablkcipher *tfm;
391 struct tegra_aes_ctx *ctx;
392 struct tegra_aes_reqctx *rctx;
393 struct ablkcipher_request *req;
394 unsigned long flags;
395 int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES;
396 int ret = 0, nblocks, total;
397 int count = 0;
398 dma_addr_t addr_in, addr_out;
399 struct scatterlist *in_sg, *out_sg;
400
401 if (!dd)
402 return -EINVAL;
403
404 spin_lock_irqsave(&dd->lock, flags);
405 backlog = crypto_get_backlog(&dd->queue);
406 async_req = crypto_dequeue_request(&dd->queue);
407 if (!async_req)
408 clear_bit(FLAGS_BUSY, &dd->flags);
409 spin_unlock_irqrestore(&dd->lock, flags);
410
411 if (!async_req)
412 return -ENODATA;
413
414 if (backlog)
415 backlog->complete(backlog, -EINPROGRESS);
416
417 req = ablkcipher_request_cast(async_req);
418
419 dev_dbg(dd->dev, "%s: get new req\n", __func__);
420
421 if (!req->src || !req->dst)
422 return -EINVAL;
423
424 /* take mutex to access the aes hw */
425 mutex_lock(&aes_lock);
426
427 /* assign new request to device */
428 dd->req = req;
429 dd->total = req->nbytes;
430 dd->in_offset = 0;
431 dd->in_sg = req->src;
432 dd->out_offset = 0;
433 dd->out_sg = req->dst;
434
435 in_sg = dd->in_sg;
436 out_sg = dd->out_sg;
437
438 total = dd->total;
439
440 tfm = crypto_ablkcipher_reqtfm(req);
441 rctx = ablkcipher_request_ctx(req);
442 ctx = crypto_ablkcipher_ctx(tfm);
443 rctx->mode &= FLAGS_MODE_MASK;
444 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
445
446 dd->iv = (u8 *)req->info;
447 dd->ivlen = crypto_ablkcipher_ivsize(tfm);
448
449 /* assign new context to device */
450 ctx->dd = dd;
451 dd->ctx = ctx;
452
453 if (ctx->flags & FLAGS_NEW_KEY) {
454 /* copy the key */
455 memcpy(dd->ivkey_base, ctx->key, ctx->keylen);
456 memset(dd->ivkey_base + ctx->keylen, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - ctx->keylen);
457 aes_set_key(dd);
458 ctx->flags &= ~FLAGS_NEW_KEY;
459 }
460
461 if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && dd->iv) {
462 /* set iv to the aes hw slot
463 * Hw generates updated iv only after iv is set in slot.
464 * So key and iv is passed asynchronously.
465 */
466 memcpy(dd->buf_in, dd->iv, dd->ivlen);
467
468 ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
469 dd->dma_buf_out, 1, FLAGS_CBC, false);
470 if (ret < 0) {
471 dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
472 goto out;
473 }
474 }
475
476 while (total) {
477 dev_dbg(dd->dev, "remain: %d\n", total);
478 ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
479 if (!ret) {
480 dev_err(dd->dev, "dma_map_sg() error\n");
481 goto out;
482 }
483
484 ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
485 if (!ret) {
486 dev_err(dd->dev, "dma_map_sg() error\n");
487 dma_unmap_sg(dd->dev, dd->in_sg,
488 1, DMA_TO_DEVICE);
489 goto out;
490 }
491
492 addr_in = sg_dma_address(in_sg);
493 addr_out = sg_dma_address(out_sg);
494 dd->flags |= FLAGS_FAST;
495 count = min_t(int, sg_dma_len(in_sg), dma_max);
496 WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg));
497 nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
498
499 ret = aes_start_crypt(dd, addr_in, addr_out, nblocks,
500 dd->flags, true);
501
502 dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
503 dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
504
505 if (ret < 0) {
506 dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
507 goto out;
508 }
509 dd->flags &= ~FLAGS_FAST;
510
511 dev_dbg(dd->dev, "out: copied %d\n", count);
512 total -= count;
513 in_sg = sg_next(in_sg);
514 out_sg = sg_next(out_sg);
515 WARN_ON(((total != 0) && (!in_sg || !out_sg)));
516 }
517
518out:
519 mutex_unlock(&aes_lock);
520
521 dd->total = total;
522
523 if (dd->req->base.complete)
524 dd->req->base.complete(&dd->req->base, ret);
525
526 dev_dbg(dd->dev, "%s: exit\n", __func__);
527 return ret;
528}
529
530static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
531 unsigned int keylen)
532{
533 struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
534 struct tegra_aes_dev *dd = aes_dev;
535 struct tegra_aes_slot *key_slot;
536
537 if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) &&
538 (keylen != AES_KEYSIZE_256)) {
539 dev_err(dd->dev, "unsupported key size\n");
540 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
541 return -EINVAL;
542 }
543
544 dev_dbg(dd->dev, "keylen: %d\n", keylen);
545
546 ctx->dd = dd;
547
548 if (key) {
549 if (!ctx->slot) {
550 key_slot = aes_find_key_slot();
551 if (!key_slot) {
552 dev_err(dd->dev, "no empty slot\n");
553 return -ENOMEM;
554 }
555
556 ctx->slot = key_slot;
557 }
558
559 memcpy(ctx->key, key, keylen);
560 ctx->keylen = keylen;
561 }
562
563 ctx->flags |= FLAGS_NEW_KEY;
564 dev_dbg(dd->dev, "done\n");
565 return 0;
566}
567
568static void aes_workqueue_handler(struct work_struct *work)
569{
570 struct tegra_aes_dev *dd = aes_dev;
571 int ret;
572
573 ret = clk_prepare_enable(dd->aes_clk);
574 if (ret)
575 BUG_ON("clock enable failed");
576
577 /* empty the crypto queue and then return */
578 do {
579 ret = tegra_aes_handle_req(dd);
580 } while (!ret);
581
582 clk_disable_unprepare(dd->aes_clk);
583}
584
585static irqreturn_t aes_irq(int irq, void *dev_id)
586{
587 struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
588 u32 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
589 int busy = test_bit(FLAGS_BUSY, &dd->flags);
590
591 if (!busy) {
592 dev_dbg(dd->dev, "spurious interrupt\n");
593 return IRQ_NONE;
594 }
595
596 dev_dbg(dd->dev, "irq_stat: 0x%x\n", value);
597 if (value & TEGRA_AES_INT_ERROR_MASK)
598 aes_writel(dd, TEGRA_AES_INT_ERROR_MASK, TEGRA_AES_INTR_STATUS);
599
600 if (!(value & TEGRA_AES_ENGINE_BUSY_FIELD))
601 complete(&dd->op_complete);
602 else
603 return IRQ_NONE;
604
605 return IRQ_HANDLED;
606}
607
608static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
609{
610 struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req);
611 struct tegra_aes_dev *dd = aes_dev;
612 unsigned long flags;
613 int err = 0;
614 int busy;
615
616 dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n",
617 req->nbytes, !!(mode & FLAGS_ENCRYPT),
618 !!(mode & FLAGS_CBC), !!(mode & FLAGS_OFB));
619
620 rctx->mode = mode;
621
622 spin_lock_irqsave(&dd->lock, flags);
623 err = ablkcipher_enqueue_request(&dd->queue, req);
624 busy = test_and_set_bit(FLAGS_BUSY, &dd->flags);
625 spin_unlock_irqrestore(&dd->lock, flags);
626
627 if (!busy)
628 queue_work(aes_wq, &aes_work);
629
630 return err;
631}
632
633static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req)
634{
635 return tegra_aes_crypt(req, FLAGS_ENCRYPT);
636}
637
638static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req)
639{
640 return tegra_aes_crypt(req, 0);
641}
642
643static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req)
644{
645 return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
646}
647
648static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req)
649{
650 return tegra_aes_crypt(req, FLAGS_CBC);
651}
652
653static int tegra_aes_ofb_encrypt(struct ablkcipher_request *req)
654{
655 return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_OFB);
656}
657
658static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req)
659{
660 return tegra_aes_crypt(req, FLAGS_OFB);
661}
662
663static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
664 unsigned int dlen)
665{
666 struct tegra_aes_dev *dd = aes_dev;
667 struct tegra_aes_ctx *ctx = &rng_ctx;
668 int ret, i;
669 u8 *dest = rdata, *dt = dd->dt;
670
671 /* take mutex to access the aes hw */
672 mutex_lock(&aes_lock);
673
674 ret = clk_prepare_enable(dd->aes_clk);
675 if (ret) {
676 mutex_unlock(&aes_lock);
677 return ret;
678 }
679
680 ctx->dd = dd;
681 dd->ctx = ctx;
682 dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
683
684 memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ);
685
686 ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
687 (u32)dd->dma_buf_out, 1, dd->flags, true);
688 if (ret < 0) {
689 dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
690 dlen = ret;
691 goto out;
692 }
693 memcpy(dest, dd->buf_out, dlen);
694
695 /* update the DT */
696 for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) {
697 dt[i] += 1;
698 if (dt[i] != 0)
699 break;
700 }
701
702out:
703 clk_disable_unprepare(dd->aes_clk);
704 mutex_unlock(&aes_lock);
705
706 dev_dbg(dd->dev, "%s: done\n", __func__);
707 return dlen;
708}
709
710static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
711 unsigned int slen)
712{
713 struct tegra_aes_dev *dd = aes_dev;
714 struct tegra_aes_ctx *ctx = &rng_ctx;
715 struct tegra_aes_slot *key_slot;
716 int ret = 0;
717 u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
718 u8 *dt;
719
720 if (!ctx || !dd) {
721 pr_err("ctx=0x%x, dd=0x%x\n",
722 (unsigned int)ctx, (unsigned int)dd);
723 return -EINVAL;
724 }
725
726 if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
727 dev_err(dd->dev, "seed size invalid");
728 return -ENOMEM;
729 }
730
731 /* take mutex to access the aes hw */
732 mutex_lock(&aes_lock);
733
734 if (!ctx->slot) {
735 key_slot = aes_find_key_slot();
736 if (!key_slot) {
737 dev_err(dd->dev, "no empty slot\n");
738 mutex_unlock(&aes_lock);
739 return -ENOMEM;
740 }
741 ctx->slot = key_slot;
742 }
743
744 ctx->dd = dd;
745 dd->ctx = ctx;
746 dd->ctr = 0;
747
748 ctx->keylen = AES_KEYSIZE_128;
749 ctx->flags |= FLAGS_NEW_KEY;
750
751 /* copy the key to the key slot */
752 memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
753 memset(dd->ivkey_base + AES_KEYSIZE_128, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - AES_KEYSIZE_128);
754
755 dd->iv = seed;
756 dd->ivlen = slen;
757
758 dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
759
760 ret = clk_prepare_enable(dd->aes_clk);
761 if (ret) {
762 mutex_unlock(&aes_lock);
763 return ret;
764 }
765
766 aes_set_key(dd);
767
768 /* set seed to the aes hw slot */
769 memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ);
770 ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
771 dd->dma_buf_out, 1, FLAGS_CBC, false);
772 if (ret < 0) {
773 dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
774 goto out;
775 }
776
777 if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
778 dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
779 } else {
780 get_random_bytes(tmp, sizeof(tmp));
781 dt = tmp;
782 }
783 memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
784
785out:
786 clk_disable_unprepare(dd->aes_clk);
787 mutex_unlock(&aes_lock);
788
789 dev_dbg(dd->dev, "%s: done\n", __func__);
790 return ret;
791}
792
793static int tegra_aes_cra_init(struct crypto_tfm *tfm)
794{
795 tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx);
796
797 return 0;
798}
799
800static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
801{
802 struct tegra_aes_ctx *ctx =
803 crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
804
805 if (ctx && ctx->slot)
806 aes_release_key_slot(ctx->slot);
807}
808
809static struct crypto_alg algs[] = {
810 {
811 .cra_name = "ecb(aes)",
812 .cra_driver_name = "ecb-aes-tegra",
813 .cra_priority = 300,
814 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
815 .cra_blocksize = AES_BLOCK_SIZE,
816 .cra_alignmask = 3,
817 .cra_type = &crypto_ablkcipher_type,
818 .cra_u.ablkcipher = {
819 .min_keysize = AES_MIN_KEY_SIZE,
820 .max_keysize = AES_MAX_KEY_SIZE,
821 .setkey = tegra_aes_setkey,
822 .encrypt = tegra_aes_ecb_encrypt,
823 .decrypt = tegra_aes_ecb_decrypt,
824 },
825 }, {
826 .cra_name = "cbc(aes)",
827 .cra_driver_name = "cbc-aes-tegra",
828 .cra_priority = 300,
829 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
830 .cra_blocksize = AES_BLOCK_SIZE,
831 .cra_alignmask = 3,
832 .cra_type = &crypto_ablkcipher_type,
833 .cra_u.ablkcipher = {
834 .min_keysize = AES_MIN_KEY_SIZE,
835 .max_keysize = AES_MAX_KEY_SIZE,
836 .ivsize = AES_MIN_KEY_SIZE,
837 .setkey = tegra_aes_setkey,
838 .encrypt = tegra_aes_cbc_encrypt,
839 .decrypt = tegra_aes_cbc_decrypt,
840 }
841 }, {
842 .cra_name = "ofb(aes)",
843 .cra_driver_name = "ofb-aes-tegra",
844 .cra_priority = 300,
845 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
846 .cra_blocksize = AES_BLOCK_SIZE,
847 .cra_alignmask = 3,
848 .cra_type = &crypto_ablkcipher_type,
849 .cra_u.ablkcipher = {
850 .min_keysize = AES_MIN_KEY_SIZE,
851 .max_keysize = AES_MAX_KEY_SIZE,
852 .ivsize = AES_MIN_KEY_SIZE,
853 .setkey = tegra_aes_setkey,
854 .encrypt = tegra_aes_ofb_encrypt,
855 .decrypt = tegra_aes_ofb_decrypt,
856 }
857 }, {
858 .cra_name = "ansi_cprng",
859 .cra_driver_name = "rng-aes-tegra",
860 .cra_flags = CRYPTO_ALG_TYPE_RNG,
861 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
862 .cra_type = &crypto_rng_type,
863 .cra_u.rng = {
864 .rng_make_random = tegra_aes_get_random,
865 .rng_reset = tegra_aes_rng_reset,
866 .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ),
867 }
868 }
869};
870
871static int tegra_aes_probe(struct platform_device *pdev)
872{
873 struct device *dev = &pdev->dev;
874 struct tegra_aes_dev *dd;
875 struct resource *res;
876 int err = -ENOMEM, i = 0, j;
877
878 dd = devm_kzalloc(dev, sizeof(struct tegra_aes_dev), GFP_KERNEL);
879 if (dd == NULL) {
880 dev_err(dev, "unable to alloc data struct.\n");
881 return err;
882 }
883
884 dd->dev = dev;
885 platform_set_drvdata(pdev, dd);
886
887 dd->slots = devm_kzalloc(dev, sizeof(struct tegra_aes_slot) *
888 AES_NR_KEYSLOTS, GFP_KERNEL);
889 if (dd->slots == NULL) {
890 dev_err(dev, "unable to alloc slot struct.\n");
891 goto out;
892 }
893
894 spin_lock_init(&dd->lock);
895 crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH);
896
897 /* Get the module base address */
898 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
899 if (!res) {
900 dev_err(dev, "invalid resource type: base\n");
901 err = -ENODEV;
902 goto out;
903 }
904
905 if (!devm_request_mem_region(&pdev->dev, res->start,
906 resource_size(res),
907 dev_name(&pdev->dev))) {
908 dev_err(&pdev->dev, "Couldn't request MEM resource\n");
909 return -ENODEV;
910 }
911
912 dd->io_base = devm_ioremap(dev, res->start, resource_size(res));
913 if (!dd->io_base) {
914 dev_err(dev, "can't ioremap register space\n");
915 err = -ENOMEM;
916 goto out;
917 }
918
919 /* Initialize the vde clock */
920 dd->aes_clk = devm_clk_get(dev, "vde");
921 if (IS_ERR(dd->aes_clk)) {
922 dev_err(dev, "iclock intialization failed.\n");
923 err = -ENODEV;
924 goto out;
925 }
926
927 err = clk_set_rate(dd->aes_clk, ULONG_MAX);
928 if (err) {
929 dev_err(dd->dev, "iclk set_rate fail(%d)\n", err);
930 goto out;
931 }
932
933 /*
934 * the foll contiguous memory is allocated as follows -
935 * - hardware key table
936 * - key schedule
937 */
938 dd->ivkey_base = dma_alloc_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
939 &dd->ivkey_phys_base,
940 GFP_KERNEL);
941 if (!dd->ivkey_base) {
942 dev_err(dev, "can not allocate iv/key buffer\n");
943 err = -ENOMEM;
944 goto out;
945 }
946
947 dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
948 &dd->dma_buf_in, GFP_KERNEL);
949 if (!dd->buf_in) {
950 dev_err(dev, "can not allocate dma-in buffer\n");
951 err = -ENOMEM;
952 goto out;
953 }
954
955 dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
956 &dd->dma_buf_out, GFP_KERNEL);
957 if (!dd->buf_out) {
958 dev_err(dev, "can not allocate dma-out buffer\n");
959 err = -ENOMEM;
960 goto out;
961 }
962
963 init_completion(&dd->op_complete);
964 aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
965 if (!aes_wq) {
966 dev_err(dev, "alloc_workqueue failed\n");
967 err = -ENOMEM;
968 goto out;
969 }
970
971 /* get the irq */
972 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
973 if (!res) {
974 dev_err(dev, "invalid resource type: base\n");
975 err = -ENODEV;
976 goto out;
977 }
978 dd->irq = res->start;
979
980 err = devm_request_irq(dev, dd->irq, aes_irq, IRQF_TRIGGER_HIGH |
981 IRQF_SHARED, "tegra-aes", dd);
982 if (err) {
983 dev_err(dev, "request_irq failed\n");
984 goto out;
985 }
986
987 mutex_init(&aes_lock);
988 INIT_LIST_HEAD(&dev_list);
989
990 spin_lock_init(&list_lock);
991 spin_lock(&list_lock);
992 for (i = 0; i < AES_NR_KEYSLOTS; i++) {
993 if (i == SSK_SLOT_NUM)
994 continue;
995 dd->slots[i].slot_num = i;
996 INIT_LIST_HEAD(&dd->slots[i].node);
997 list_add_tail(&dd->slots[i].node, &dev_list);
998 }
999 spin_unlock(&list_lock);
1000
1001 aes_dev = dd;
1002 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1003 algs[i].cra_priority = 300;
1004 algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx);
1005 algs[i].cra_module = THIS_MODULE;
1006 algs[i].cra_init = tegra_aes_cra_init;
1007 algs[i].cra_exit = tegra_aes_cra_exit;
1008
1009 err = crypto_register_alg(&algs[i]);
1010 if (err)
1011 goto out;
1012 }
1013
1014 dev_info(dev, "registered");
1015 return 0;
1016
1017out:
1018 for (j = 0; j < i; j++)
1019 crypto_unregister_alg(&algs[j]);
1020 if (dd->ivkey_base)
1021 dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
1022 dd->ivkey_base, dd->ivkey_phys_base);
1023 if (dd->buf_in)
1024 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
1025 dd->buf_in, dd->dma_buf_in);
1026 if (dd->buf_out)
1027 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
1028 dd->buf_out, dd->dma_buf_out);
1029 if (aes_wq)
1030 destroy_workqueue(aes_wq);
1031 spin_lock(&list_lock);
1032 list_del(&dev_list);
1033 spin_unlock(&list_lock);
1034
1035 aes_dev = NULL;
1036
1037 dev_err(dev, "%s: initialization failed.\n", __func__);
1038 return err;
1039}
1040
1041static int tegra_aes_remove(struct platform_device *pdev)
1042{
1043 struct device *dev = &pdev->dev;
1044 struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
1045 int i;
1046
1047 for (i = 0; i < ARRAY_SIZE(algs); i++)
1048 crypto_unregister_alg(&algs[i]);
1049
1050 cancel_work_sync(&aes_work);
1051 destroy_workqueue(aes_wq);
1052 spin_lock(&list_lock);
1053 list_del(&dev_list);
1054 spin_unlock(&list_lock);
1055
1056 dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
1057 dd->ivkey_base, dd->ivkey_phys_base);
1058 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
1059 dd->buf_in, dd->dma_buf_in);
1060 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
1061 dd->buf_out, dd->dma_buf_out);
1062 aes_dev = NULL;
1063
1064 return 0;
1065}
1066
1067static struct of_device_id tegra_aes_of_match[] = {
1068 { .compatible = "nvidia,tegra20-aes", },
1069 { .compatible = "nvidia,tegra30-aes", },
1070 { },
1071};
1072
1073static struct platform_driver tegra_aes_driver = {
1074 .probe = tegra_aes_probe,
1075 .remove = tegra_aes_remove,
1076 .driver = {
1077 .name = "tegra-aes",
1078 .owner = THIS_MODULE,
1079 .of_match_table = tegra_aes_of_match,
1080 },
1081};
1082
1083module_platform_driver(tegra_aes_driver);
1084
1085MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support.");
1086MODULE_AUTHOR("NVIDIA Corporation");
1087MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/tegra-aes.h b/drivers/crypto/tegra-aes.h
deleted file mode 100644
index 6006333a8934..000000000000
--- a/drivers/crypto/tegra-aes.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * Copyright (c) 2010, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __CRYPTODEV_TEGRA_AES_H
20#define __CRYPTODEV_TEGRA_AES_H
21
22#define TEGRA_AES_ICMDQUE_WR 0x1000
23#define TEGRA_AES_CMDQUE_CONTROL 0x1008
24#define TEGRA_AES_INTR_STATUS 0x1018
25#define TEGRA_AES_INT_ENB 0x1040
26#define TEGRA_AES_CONFIG 0x1044
27#define TEGRA_AES_IRAM_ACCESS_CFG 0x10A0
28#define TEGRA_AES_SECURE_DEST_ADDR 0x1100
29#define TEGRA_AES_SECURE_INPUT_SELECT 0x1104
30#define TEGRA_AES_SECURE_CONFIG 0x1108
31#define TEGRA_AES_SECURE_CONFIG_EXT 0x110C
32#define TEGRA_AES_SECURE_SECURITY 0x1110
33#define TEGRA_AES_SECURE_HASH_RESULT0 0x1120
34#define TEGRA_AES_SECURE_HASH_RESULT1 0x1124
35#define TEGRA_AES_SECURE_HASH_RESULT2 0x1128
36#define TEGRA_AES_SECURE_HASH_RESULT3 0x112C
37#define TEGRA_AES_SECURE_SEC_SEL0 0x1140
38#define TEGRA_AES_SECURE_SEC_SEL1 0x1144
39#define TEGRA_AES_SECURE_SEC_SEL2 0x1148
40#define TEGRA_AES_SECURE_SEC_SEL3 0x114C
41#define TEGRA_AES_SECURE_SEC_SEL4 0x1150
42#define TEGRA_AES_SECURE_SEC_SEL5 0x1154
43#define TEGRA_AES_SECURE_SEC_SEL6 0x1158
44#define TEGRA_AES_SECURE_SEC_SEL7 0x115C
45
46/* interrupt status reg masks and shifts */
47#define TEGRA_AES_ENGINE_BUSY_FIELD BIT(0)
48#define TEGRA_AES_ICQ_EMPTY_FIELD BIT(3)
49#define TEGRA_AES_DMA_BUSY_FIELD BIT(23)
50
51/* secure select reg masks and shifts */
52#define TEGRA_AES_SECURE_SEL0_KEYREAD_ENB0_FIELD BIT(0)
53
54/* secure config ext masks and shifts */
55#define TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD BIT(15)
56
57/* secure config masks and shifts */
58#define TEGRA_AES_SECURE_KEY_INDEX_SHIFT 20
59#define TEGRA_AES_SECURE_KEY_INDEX_FIELD (0x1F << TEGRA_AES_SECURE_KEY_INDEX_SHIFT)
60#define TEGRA_AES_SECURE_BLOCK_CNT_SHIFT 0
61#define TEGRA_AES_SECURE_BLOCK_CNT_FIELD (0xFFFFF << TEGRA_AES_SECURE_BLOCK_CNT_SHIFT)
62
63/* stream interface select masks and shifts */
64#define TEGRA_AES_CMDQ_CTRL_UCMDQEN_FIELD BIT(0)
65#define TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD BIT(1)
66#define TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD BIT(4)
67#define TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD BIT(5)
68
69/* config register masks and shifts */
70#define TEGRA_AES_CONFIG_ENDIAN_ENB_FIELD BIT(10)
71#define TEGRA_AES_CONFIG_MODE_SEL_SHIFT 0
72#define TEGRA_AES_CONFIG_MODE_SEL_FIELD (0x1F << TEGRA_AES_CONFIG_MODE_SEL_SHIFT)
73
74/* extended config */
75#define TEGRA_AES_SECURE_OFFSET_CNT_SHIFT 24
76#define TEGRA_AES_SECURE_OFFSET_CNT_FIELD (0xFF << TEGRA_AES_SECURE_OFFSET_CNT_SHIFT)
77#define TEGRA_AES_SECURE_KEYSCHED_GEN_FIELD BIT(15)
78
79/* init vector select */
80#define TEGRA_AES_SECURE_IV_SELECT_SHIFT 10
81#define TEGRA_AES_SECURE_IV_SELECT_FIELD BIT(10)
82
83/* secure engine input */
84#define TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT 28
85#define TEGRA_AES_SECURE_INPUT_ALG_SEL_FIELD (0xF << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT)
86#define TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT 16
87#define TEGRA_AES_SECURE_INPUT_KEY_LEN_FIELD (0xFFF << TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT)
88#define TEGRA_AES_SECURE_RNG_ENB_FIELD BIT(11)
89#define TEGRA_AES_SECURE_CORE_SEL_SHIFT 9
90#define TEGRA_AES_SECURE_CORE_SEL_FIELD BIT(9)
91#define TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT 7
92#define TEGRA_AES_SECURE_VCTRAM_SEL_FIELD (0x3 << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT)
93#define TEGRA_AES_SECURE_INPUT_SEL_SHIFT 5
94#define TEGRA_AES_SECURE_INPUT_SEL_FIELD (0x3 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT)
95#define TEGRA_AES_SECURE_XOR_POS_SHIFT 3
96#define TEGRA_AES_SECURE_XOR_POS_FIELD (0x3 << TEGRA_AES_SECURE_XOR_POS_SHIFT)
97#define TEGRA_AES_SECURE_HASH_ENB_FIELD BIT(2)
98#define TEGRA_AES_SECURE_ON_THE_FLY_FIELD BIT(0)
99
100/* interrupt error mask */
101#define TEGRA_AES_INT_ERROR_MASK 0xFFF000
102
103#endif