diff options
Diffstat (limited to 'drivers/crypto/tegra-se.c')
-rw-r--r-- | drivers/crypto/tegra-se.c | 2455 |
1 files changed, 2455 insertions, 0 deletions
diff --git a/drivers/crypto/tegra-se.c b/drivers/crypto/tegra-se.c new file mode 100644 index 00000000000..3d2e9187b94 --- /dev/null +++ b/drivers/crypto/tegra-se.c | |||
@@ -0,0 +1,2455 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * drivers/crypto/tegra-se.c | ||
4 | * | ||
5 | * Support for Tegra Security Engine hardware crypto algorithms. | ||
6 | * | ||
7 | * Copyright (c) 2011, NVIDIA Corporation. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/clk.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | #include <linux/scatterlist.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | #include <linux/io.h> | ||
33 | #include <linux/mutex.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/types.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <crypto/scatterwalk.h> | ||
38 | #include <crypto/algapi.h> | ||
39 | #include <crypto/aes.h> | ||
40 | #include <crypto/internal/rng.h> | ||
41 | #include <crypto/internal/hash.h> | ||
42 | #include <crypto/sha.h> | ||
43 | #include <linux/pm_runtime.h> | ||
44 | |||
45 | #include "tegra-se.h" | ||
46 | |||
47 | #define DRIVER_NAME "tegra-se" | ||
48 | |||
49 | /* Security Engine operation modes */ | ||
50 | enum tegra_se_aes_op_mode { | ||
51 | SE_AES_OP_MODE_CBC, /* Cipher Block Chaining (CBC) mode */ | ||
52 | SE_AES_OP_MODE_ECB, /* Electronic Codebook (ECB) mode */ | ||
53 | SE_AES_OP_MODE_CTR, /* Counter (CTR) mode */ | ||
54 | SE_AES_OP_MODE_OFB, /* Output feedback (CFB) mode */ | ||
55 | SE_AES_OP_MODE_RNG_X931, /* Random number generator (RNG) mode */ | ||
56 | SE_AES_OP_MODE_CMAC, /* Cipher-based MAC (CMAC) mode */ | ||
57 | SE_AES_OP_MODE_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */ | ||
58 | SE_AES_OP_MODE_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */ | ||
59 | SE_AES_OP_MODE_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */ | ||
60 | SE_AES_OP_MODE_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */ | ||
61 | SE_AES_OP_MODE_SHA512 /* Secure Hash Algorithm-512 (SHA512) mode */ | ||
62 | }; | ||
63 | |||
64 | /* Security Engine key table type */ | ||
65 | enum tegra_se_key_table_type { | ||
66 | SE_KEY_TABLE_TYPE_KEY, /* Key */ | ||
67 | SE_KEY_TABLE_TYPE_ORGIV, /* Original IV */ | ||
68 | SE_KEY_TABLE_TYPE_UPDTDIV /* Updated IV */ | ||
69 | }; | ||
70 | |||
71 | /* Security Engine request context */ | ||
72 | struct tegra_se_req_context { | ||
73 | enum tegra_se_aes_op_mode op_mode; /* Security Engine operation mode */ | ||
74 | bool encrypt; /* Operation type */ | ||
75 | }; | ||
76 | |||
77 | struct tegra_se_dev { | ||
78 | struct device *dev; | ||
79 | void __iomem *io_reg; /* se device memory/io */ | ||
80 | void __iomem *pmc_io_reg; /* pmc device memory/io */ | ||
81 | int irq; /* irq allocated */ | ||
82 | spinlock_t lock; /* spin lock */ | ||
83 | struct clk *pclk; /* Security Engine clock */ | ||
84 | struct crypto_queue queue; /* Security Engine crypto queue */ | ||
85 | struct tegra_se_slot *slot_list; /* pointer to key slots */ | ||
86 | u64 ctr; | ||
87 | u32 *src_ll_buf; /* pointer to source linked list buffer */ | ||
88 | dma_addr_t src_ll_buf_adr; /* Source linked list buffer dma address */ | ||
89 | u32 src_ll_size; /* Size of source linked list buffer */ | ||
90 | u32 *dst_ll_buf; /* pointer to destination linked list buffer */ | ||
91 | dma_addr_t dst_ll_buf_adr; /* Destination linked list dma address */ | ||
92 | u32 dst_ll_size; /* Size of destination linked list buffer */ | ||
93 | u32 *ctx_save_buf; /* LP context buffer pointer*/ | ||
94 | dma_addr_t ctx_save_buf_adr; /* LP context buffer dma address*/ | ||
95 | struct completion complete; /* Tells the task completion */ | ||
96 | bool work_q_busy; /* Work queue busy status */ | ||
97 | }; | ||
98 | |||
99 | static struct tegra_se_dev *sg_tegra_se_dev; | ||
100 | |||
101 | /* Security Engine AES context */ | ||
102 | struct tegra_se_aes_context { | ||
103 | struct tegra_se_dev *se_dev; /* Security Engine device */ | ||
104 | struct tegra_se_slot *slot; /* Security Engine key slot */ | ||
105 | u32 keylen; /* key length in bits */ | ||
106 | u32 op_mode; /* AES operation mode */ | ||
107 | }; | ||
108 | |||
109 | /* Security Engine random number generator context */ | ||
110 | struct tegra_se_rng_context { | ||
111 | struct tegra_se_dev *se_dev; /* Security Engine device */ | ||
112 | struct tegra_se_slot *slot; /* Security Engine key slot */ | ||
113 | u32 *dt_buf; /* Destination buffer pointer */ | ||
114 | dma_addr_t dt_buf_adr; /* Destination buffer dma address */ | ||
115 | u32 *rng_buf; /* RNG buffer pointer */ | ||
116 | dma_addr_t rng_buf_adr; /* RNG buffer dma address */ | ||
117 | bool use_org_iv; /* Tells whether original IV is be used | ||
118 | or not. If it is false updated IV is used*/ | ||
119 | }; | ||
120 | |||
121 | /* Security Engine SHA context */ | ||
122 | struct tegra_se_sha_context { | ||
123 | struct tegra_se_dev *se_dev; /* Security Engine device */ | ||
124 | u32 op_mode; /* SHA operation mode */ | ||
125 | }; | ||
126 | |||
127 | /* Security Engine AES CMAC context */ | ||
128 | struct tegra_se_aes_cmac_context { | ||
129 | struct tegra_se_dev *se_dev; /* Security Engine device */ | ||
130 | struct tegra_se_slot *slot; /* Security Engine key slot */ | ||
131 | u32 keylen; /* key length in bits */ | ||
132 | u8 K1[TEGRA_SE_KEY_128_SIZE]; /* Key1 */ | ||
133 | u8 K2[TEGRA_SE_KEY_128_SIZE]; /* Key2 */ | ||
134 | dma_addr_t dma_addr; /* DMA address of local buffer */ | ||
135 | u32 buflen; /* local buffer length */ | ||
136 | u8 *buffer; /* local buffer pointer */ | ||
137 | }; | ||
138 | |||
139 | /* Security Engine key slot */ | ||
140 | struct tegra_se_slot { | ||
141 | struct list_head node; | ||
142 | u8 slot_num; /* Key slot number */ | ||
143 | bool available; /* Tells whether key slot is free to use */ | ||
144 | }; | ||
145 | |||
146 | static struct tegra_se_slot ssk_slot = { | ||
147 | .slot_num = 15, | ||
148 | .available = false, | ||
149 | }; | ||
150 | |||
151 | static struct tegra_se_slot srk_slot = { | ||
152 | .slot_num = 0, | ||
153 | .available = false, | ||
154 | }; | ||
155 | |||
156 | /* Security Engine Linked List */ | ||
157 | struct tegra_se_ll { | ||
158 | dma_addr_t addr; /* DMA buffer address */ | ||
159 | u32 data_len; /* Data length in DMA buffer */ | ||
160 | }; | ||
161 | |||
162 | static LIST_HEAD(key_slot); | ||
163 | static DEFINE_SPINLOCK(key_slot_lock); | ||
164 | static DEFINE_MUTEX(se_hw_lock); | ||
165 | |||
166 | /* create a work for handling the async transfers */ | ||
167 | static void tegra_se_work_handler(struct work_struct *work); | ||
168 | static DECLARE_WORK(se_work, tegra_se_work_handler); | ||
169 | static struct workqueue_struct *se_work_q; | ||
170 | |||
171 | #define PMC_SCRATCH43_REG_OFFSET 0x22c | ||
172 | #define GET_MSB(x) ((x) >> (8*sizeof(x)-1)) | ||
173 | static void tegra_se_leftshift_onebit(u8 *in_buf, u32 size, u8 *org_msb) | ||
174 | { | ||
175 | u8 carry; | ||
176 | u32 i; | ||
177 | |||
178 | *org_msb = GET_MSB(in_buf[0]); | ||
179 | |||
180 | /* left shift one bit */ | ||
181 | in_buf[0] <<= 1; | ||
182 | for (carry = 0, i = 1; i < size; i++) { | ||
183 | carry = GET_MSB(in_buf[i]); | ||
184 | in_buf[i-1] |= carry; | ||
185 | in_buf[i] <<= 1; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | extern unsigned long long tegra_chip_uid(void); | ||
190 | |||
191 | static inline void se_writel(struct tegra_se_dev *se_dev, | ||
192 | unsigned int val, unsigned int reg_offset) | ||
193 | { | ||
194 | writel(val, se_dev->io_reg + reg_offset); | ||
195 | } | ||
196 | |||
197 | static inline unsigned int se_readl(struct tegra_se_dev *se_dev, | ||
198 | unsigned int reg_offset) | ||
199 | { | ||
200 | unsigned int val; | ||
201 | |||
202 | val = readl(se_dev->io_reg + reg_offset); | ||
203 | |||
204 | return val; | ||
205 | } | ||
206 | |||
207 | static void tegra_se_free_key_slot(struct tegra_se_slot *slot) | ||
208 | { | ||
209 | if (slot) { | ||
210 | spin_lock(&key_slot_lock); | ||
211 | slot->available = true; | ||
212 | spin_unlock(&key_slot_lock); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static struct tegra_se_slot *tegra_se_alloc_key_slot(void) | ||
217 | { | ||
218 | struct tegra_se_slot *slot = NULL; | ||
219 | bool found = false; | ||
220 | |||
221 | spin_lock(&key_slot_lock); | ||
222 | list_for_each_entry(slot, &key_slot, node) { | ||
223 | if (slot->available) { | ||
224 | slot->available = false; | ||
225 | found = true; | ||
226 | break; | ||
227 | } | ||
228 | } | ||
229 | spin_unlock(&key_slot_lock); | ||
230 | return found ? slot : NULL; | ||
231 | } | ||
232 | |||
233 | static int tegra_init_key_slot(struct tegra_se_dev *se_dev) | ||
234 | { | ||
235 | int i; | ||
236 | |||
237 | se_dev->slot_list = kzalloc(sizeof(struct tegra_se_slot) * | ||
238 | TEGRA_SE_KEYSLOT_COUNT, GFP_KERNEL); | ||
239 | if (se_dev->slot_list == NULL) { | ||
240 | dev_err(se_dev->dev, "slot list memory allocation failed\n"); | ||
241 | return -ENOMEM; | ||
242 | } | ||
243 | spin_lock_init(&key_slot_lock); | ||
244 | spin_lock(&key_slot_lock); | ||
245 | for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) { | ||
246 | /* | ||
247 | * Slot 0 and 15 are reserved and will not be added to the | ||
248 | * free slots pool. Slot 0 is used for SRK generation and | ||
249 | * Slot 15 is used for SSK operation | ||
250 | */ | ||
251 | if ((i == srk_slot.slot_num) || (i == ssk_slot.slot_num)) | ||
252 | continue; | ||
253 | se_dev->slot_list[i].available = true; | ||
254 | se_dev->slot_list[i].slot_num = i; | ||
255 | INIT_LIST_HEAD(&se_dev->slot_list[i].node); | ||
256 | list_add_tail(&se_dev->slot_list[i].node, &key_slot); | ||
257 | } | ||
258 | spin_unlock(&key_slot_lock); | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static void tegra_se_key_read_disable(u8 slot_num) | ||
264 | { | ||
265 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
266 | u32 val; | ||
267 | |||
268 | val = se_readl(se_dev, | ||
269 | (SE_KEY_TABLE_ACCESS_REG_OFFSET + (slot_num * 4))); | ||
270 | val &= ~(1 << SE_KEY_READ_DISABLE_SHIFT); | ||
271 | se_writel(se_dev, | ||
272 | val, (SE_KEY_TABLE_ACCESS_REG_OFFSET + (slot_num * 4))); | ||
273 | } | ||
274 | |||
275 | static void tegra_se_key_read_disable_all(void) | ||
276 | { | ||
277 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
278 | u8 slot_num; | ||
279 | |||
280 | mutex_lock(&se_hw_lock); | ||
281 | pm_runtime_get_sync(se_dev->dev); | ||
282 | |||
283 | for (slot_num = 0; slot_num < TEGRA_SE_KEYSLOT_COUNT; slot_num++) | ||
284 | tegra_se_key_read_disable(slot_num); | ||
285 | |||
286 | pm_runtime_put(se_dev->dev); | ||
287 | mutex_unlock(&se_hw_lock); | ||
288 | } | ||
289 | |||
290 | static void tegra_se_config_algo(struct tegra_se_dev *se_dev, | ||
291 | enum tegra_se_aes_op_mode mode, bool encrypt, u32 key_len) | ||
292 | { | ||
293 | u32 val = 0; | ||
294 | |||
295 | switch (mode) { | ||
296 | case SE_AES_OP_MODE_CBC: | ||
297 | case SE_AES_OP_MODE_CMAC: | ||
298 | if (encrypt) { | ||
299 | val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); | ||
300 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
301 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
302 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
303 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
304 | else | ||
305 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
306 | val |= SE_CONFIG_DEC_ALG(ALG_NOP); | ||
307 | } else { | ||
308 | val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); | ||
309 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
310 | val |= SE_CONFIG_DEC_MODE(MODE_KEY256); | ||
311 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
312 | val |= SE_CONFIG_DEC_MODE(MODE_KEY192); | ||
313 | else | ||
314 | val |= SE_CONFIG_DEC_MODE(MODE_KEY128); | ||
315 | } | ||
316 | if (mode == SE_AES_OP_MODE_CMAC) | ||
317 | val |= SE_CONFIG_DST(DST_HASHREG); | ||
318 | else | ||
319 | val |= SE_CONFIG_DST(DST_MEMORY); | ||
320 | break; | ||
321 | case SE_AES_OP_MODE_RNG_X931: | ||
322 | val = SE_CONFIG_ENC_ALG(ALG_RNG) | | ||
323 | SE_CONFIG_ENC_MODE(MODE_KEY128) | | ||
324 | SE_CONFIG_DST(DST_MEMORY); | ||
325 | break; | ||
326 | case SE_AES_OP_MODE_ECB: | ||
327 | if (encrypt) { | ||
328 | val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); | ||
329 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
330 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
331 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
332 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
333 | else | ||
334 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
335 | } else { | ||
336 | val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); | ||
337 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
338 | val |= SE_CONFIG_DEC_MODE(MODE_KEY256); | ||
339 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
340 | val |= SE_CONFIG_DEC_MODE(MODE_KEY192); | ||
341 | else | ||
342 | val |= SE_CONFIG_DEC_MODE(MODE_KEY128); | ||
343 | } | ||
344 | val |= SE_CONFIG_DST(DST_MEMORY); | ||
345 | break; | ||
346 | case SE_AES_OP_MODE_CTR: | ||
347 | if (encrypt) { | ||
348 | val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); | ||
349 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
350 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
351 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
352 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
353 | else | ||
354 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
355 | } else { | ||
356 | val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); | ||
357 | if (key_len == TEGRA_SE_KEY_256_SIZE) { | ||
358 | val |= SE_CONFIG_DEC_MODE(MODE_KEY256); | ||
359 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
360 | } else if (key_len == TEGRA_SE_KEY_192_SIZE) { | ||
361 | val |= SE_CONFIG_DEC_MODE(MODE_KEY192); | ||
362 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
363 | } else { | ||
364 | val |= SE_CONFIG_DEC_MODE(MODE_KEY128); | ||
365 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
366 | } | ||
367 | } | ||
368 | val |= SE_CONFIG_DST(DST_MEMORY); | ||
369 | break; | ||
370 | case SE_AES_OP_MODE_OFB: | ||
371 | if (encrypt) { | ||
372 | val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); | ||
373 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
374 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
375 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
376 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
377 | else | ||
378 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
379 | } else { | ||
380 | val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); | ||
381 | if (key_len == TEGRA_SE_KEY_256_SIZE) { | ||
382 | val |= SE_CONFIG_DEC_MODE(MODE_KEY256); | ||
383 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
384 | } else if (key_len == TEGRA_SE_KEY_192_SIZE) { | ||
385 | val |= SE_CONFIG_DEC_MODE(MODE_KEY192); | ||
386 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
387 | } else { | ||
388 | val |= SE_CONFIG_DEC_MODE(MODE_KEY128); | ||
389 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
390 | } | ||
391 | } | ||
392 | val |= SE_CONFIG_DST(DST_MEMORY); | ||
393 | break; | ||
394 | case SE_AES_OP_MODE_SHA1: | ||
395 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
396 | SE_CONFIG_ENC_MODE(MODE_SHA1) | | ||
397 | SE_CONFIG_DST(DST_HASHREG); | ||
398 | break; | ||
399 | case SE_AES_OP_MODE_SHA224: | ||
400 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
401 | SE_CONFIG_ENC_MODE(MODE_SHA224) | | ||
402 | SE_CONFIG_DST(DST_HASHREG); | ||
403 | break; | ||
404 | case SE_AES_OP_MODE_SHA256: | ||
405 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
406 | SE_CONFIG_ENC_MODE(MODE_SHA256) | | ||
407 | SE_CONFIG_DST(DST_HASHREG); | ||
408 | break; | ||
409 | case SE_AES_OP_MODE_SHA384: | ||
410 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
411 | SE_CONFIG_ENC_MODE(MODE_SHA384) | | ||
412 | SE_CONFIG_DST(DST_HASHREG); | ||
413 | break; | ||
414 | case SE_AES_OP_MODE_SHA512: | ||
415 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
416 | SE_CONFIG_ENC_MODE(MODE_SHA512) | | ||
417 | SE_CONFIG_DST(DST_HASHREG); | ||
418 | break; | ||
419 | default: | ||
420 | dev_warn(se_dev->dev, "Invalid operation mode\n"); | ||
421 | break; | ||
422 | } | ||
423 | |||
424 | se_writel(se_dev, val, SE_CONFIG_REG_OFFSET); | ||
425 | } | ||
426 | |||
427 | static void tegra_se_write_seed(struct tegra_se_dev *se_dev, u32 *pdata) | ||
428 | { | ||
429 | u32 i; | ||
430 | |||
431 | for (i = 0; i < SE_CRYPTO_CTR_REG_COUNT; i++) | ||
432 | se_writel(se_dev, pdata[i], SE_CRYPTO_CTR_REG_OFFSET + (i * 4)); | ||
433 | } | ||
434 | |||
435 | static void tegra_se_write_key_table(u8 *pdata, u32 data_len, | ||
436 | u8 slot_num, enum tegra_se_key_table_type type) | ||
437 | { | ||
438 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
439 | u32 data_size = SE_KEYTABLE_REG_MAX_DATA; | ||
440 | u32 *pdata_buf = (u32 *)pdata; | ||
441 | u8 pkt = 0, quad = 0; | ||
442 | u32 val = 0, i; | ||
443 | |||
444 | if ((type == SE_KEY_TABLE_TYPE_KEY) && (slot_num == ssk_slot.slot_num)) | ||
445 | return; | ||
446 | |||
447 | if (type == SE_KEY_TABLE_TYPE_ORGIV) | ||
448 | quad = QUAD_ORG_IV; | ||
449 | else if (type == SE_KEY_TABLE_TYPE_UPDTDIV) | ||
450 | quad = QUAD_UPDTD_IV; | ||
451 | else | ||
452 | quad = QUAD_KEYS_128; | ||
453 | |||
454 | /* write data to the key table */ | ||
455 | do { | ||
456 | for (i = 0; i < data_size; i += 4, data_len -= 4) | ||
457 | se_writel(se_dev, *pdata_buf++, | ||
458 | SE_KEYTABLE_DATA0_REG_OFFSET + i); | ||
459 | |||
460 | pkt = SE_KEYTABLE_SLOT(slot_num) | SE_KEYTABLE_QUAD(quad); | ||
461 | val = SE_KEYTABLE_OP_TYPE(OP_WRITE) | | ||
462 | SE_KEYTABLE_TABLE_SEL(TABLE_KEYIV) | | ||
463 | SE_KEYTABLE_PKT(pkt); | ||
464 | |||
465 | se_writel(se_dev, val, SE_KEYTABLE_REG_OFFSET); | ||
466 | |||
467 | data_size = data_len; | ||
468 | quad = QUAD_KEYS_256; | ||
469 | |||
470 | } while (data_len); | ||
471 | } | ||
472 | |||
473 | static void tegra_se_config_crypto(struct tegra_se_dev *se_dev, | ||
474 | enum tegra_se_aes_op_mode mode, bool encrypt, u8 slot_num, bool org_iv) | ||
475 | { | ||
476 | u32 val = 0; | ||
477 | |||
478 | switch (mode) { | ||
479 | case SE_AES_OP_MODE_CMAC: | ||
480 | case SE_AES_OP_MODE_CBC: | ||
481 | if (encrypt) { | ||
482 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
483 | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) | | ||
484 | SE_CRYPTO_XOR_POS(XOR_TOP) | | ||
485 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
486 | } else { | ||
487 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
488 | SE_CRYPTO_VCTRAM_SEL(VCTRAM_PREVAHB) | | ||
489 | SE_CRYPTO_XOR_POS(XOR_BOTTOM) | | ||
490 | SE_CRYPTO_CORE_SEL(CORE_DECRYPT); | ||
491 | } | ||
492 | break; | ||
493 | case SE_AES_OP_MODE_RNG_X931: | ||
494 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
495 | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
496 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
497 | break; | ||
498 | case SE_AES_OP_MODE_ECB: | ||
499 | if (encrypt) { | ||
500 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
501 | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
502 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
503 | } else { | ||
504 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
505 | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
506 | SE_CRYPTO_CORE_SEL(CORE_DECRYPT); | ||
507 | } | ||
508 | break; | ||
509 | case SE_AES_OP_MODE_CTR: | ||
510 | val = SE_CRYPTO_INPUT_SEL(INPUT_LNR_CTR) | | ||
511 | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AHB) | | ||
512 | SE_CRYPTO_XOR_POS(XOR_BOTTOM) | | ||
513 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
514 | break; | ||
515 | case SE_AES_OP_MODE_OFB: | ||
516 | val = SE_CRYPTO_INPUT_SEL(INPUT_AESOUT) | | ||
517 | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AHB) | | ||
518 | SE_CRYPTO_XOR_POS(XOR_BOTTOM) | | ||
519 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
520 | break; | ||
521 | default: | ||
522 | dev_warn(se_dev->dev, "Invalid operation mode\n"); | ||
523 | break; | ||
524 | } | ||
525 | |||
526 | if (mode == SE_AES_OP_MODE_CTR) { | ||
527 | val |= SE_CRYPTO_HASH(HASH_DISABLE) | | ||
528 | SE_CRYPTO_KEY_INDEX(slot_num) | | ||
529 | SE_CRYPTO_CTR_CNTN(1); | ||
530 | } else { | ||
531 | val |= SE_CRYPTO_HASH(HASH_DISABLE) | | ||
532 | SE_CRYPTO_KEY_INDEX(slot_num) | | ||
533 | (org_iv ? SE_CRYPTO_IV_SEL(IV_ORIGINAL) : | ||
534 | SE_CRYPTO_IV_SEL(IV_UPDATED)); | ||
535 | } | ||
536 | |||
537 | /* enable hash for CMAC */ | ||
538 | if (mode == SE_AES_OP_MODE_CMAC) | ||
539 | val |= SE_CRYPTO_HASH(HASH_ENABLE); | ||
540 | |||
541 | se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET); | ||
542 | |||
543 | if (mode == SE_AES_OP_MODE_CTR) | ||
544 | se_writel(se_dev, 1, SE_SPARE_0_REG_OFFSET); | ||
545 | |||
546 | if (mode == SE_AES_OP_MODE_OFB) | ||
547 | se_writel(se_dev, 1, SE_SPARE_0_REG_OFFSET); | ||
548 | |||
549 | } | ||
550 | |||
551 | static void tegra_se_config_sha(struct tegra_se_dev *se_dev, u32 count) | ||
552 | { | ||
553 | int i; | ||
554 | |||
555 | se_writel(se_dev, (count * 8), SE_SHA_MSG_LENGTH_REG_OFFSET); | ||
556 | se_writel(se_dev, (count * 8), SE_SHA_MSG_LEFT_REG_OFFSET); | ||
557 | for (i = 1; i < 4; i++) { | ||
558 | se_writel(se_dev, 0, SE_SHA_MSG_LENGTH_REG_OFFSET + (4 * i)); | ||
559 | se_writel(se_dev, 0, SE_SHA_MSG_LEFT_REG_OFFSET + (4 * i)); | ||
560 | } | ||
561 | se_writel(se_dev, SHA_ENABLE, SE_SHA_CONFIG_REG_OFFSET); | ||
562 | } | ||
563 | |||
564 | static int tegra_se_start_operation(struct tegra_se_dev *se_dev, u32 nbytes, | ||
565 | bool context_save) | ||
566 | { | ||
567 | u32 nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE; | ||
568 | int ret = 0; | ||
569 | u32 val = 0; | ||
570 | |||
571 | /* clear any pending interrupts */ | ||
572 | val = se_readl(se_dev, SE_INT_STATUS_REG_OFFSET); | ||
573 | se_writel(se_dev, val, SE_INT_STATUS_REG_OFFSET); | ||
574 | se_writel(se_dev, se_dev->src_ll_buf_adr, SE_IN_LL_ADDR_REG_OFFSET); | ||
575 | se_writel(se_dev, se_dev->dst_ll_buf_adr, SE_OUT_LL_ADDR_REG_OFFSET); | ||
576 | |||
577 | if (nblocks) | ||
578 | se_writel(se_dev, nblocks-1, SE_BLOCK_COUNT_REG_OFFSET); | ||
579 | |||
580 | /* enable interupts */ | ||
581 | val = SE_INT_ERROR(INT_ENABLE) | SE_INT_OP_DONE(INT_ENABLE); | ||
582 | se_writel(se_dev, val, SE_INT_ENABLE_REG_OFFSET); | ||
583 | |||
584 | INIT_COMPLETION(se_dev->complete); | ||
585 | |||
586 | if (context_save) | ||
587 | se_writel(se_dev, SE_OPERATION(OP_CTX_SAVE), | ||
588 | SE_OPERATION_REG_OFFSET); | ||
589 | else | ||
590 | se_writel(se_dev, SE_OPERATION(OP_SRART), | ||
591 | SE_OPERATION_REG_OFFSET); | ||
592 | |||
593 | ret = wait_for_completion_timeout(&se_dev->complete, | ||
594 | msecs_to_jiffies(1000)); | ||
595 | if (ret == 0) { | ||
596 | dev_err(se_dev->dev, "operation timed out no interrupt\n"); | ||
597 | return -ETIMEDOUT; | ||
598 | } | ||
599 | |||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | static void tegra_se_read_hash_result(struct tegra_se_dev *se_dev, | ||
604 | u8 *pdata, u32 nbytes, bool swap32) | ||
605 | { | ||
606 | u32 *result = (u32 *)pdata; | ||
607 | u32 i; | ||
608 | |||
609 | for (i = 0; i < nbytes/4; i++) { | ||
610 | result[i] = se_readl(se_dev, SE_HASH_RESULT_REG_OFFSET + | ||
611 | (i * sizeof(u32))); | ||
612 | if (swap32) | ||
613 | result[i] = be32_to_cpu(result[i]); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static int tegra_se_count_sgs(struct scatterlist *sl, u32 total_bytes) | ||
618 | { | ||
619 | int i = 0; | ||
620 | |||
621 | if (!total_bytes) | ||
622 | return 0; | ||
623 | |||
624 | do { | ||
625 | total_bytes -= min(sl[i].length, total_bytes); | ||
626 | i++; | ||
627 | } while (total_bytes); | ||
628 | |||
629 | return i; | ||
630 | } | ||
631 | |||
632 | static int tegra_se_alloc_ll_buf(struct tegra_se_dev *se_dev, | ||
633 | u32 num_src_sgs, u32 num_dst_sgs) | ||
634 | { | ||
635 | if (se_dev->src_ll_buf || se_dev->dst_ll_buf) { | ||
636 | dev_err(se_dev->dev, "trying to allocate memory to allocated memory\n"); | ||
637 | return -EBUSY; | ||
638 | } | ||
639 | |||
640 | if (num_src_sgs) { | ||
641 | se_dev->src_ll_size = | ||
642 | (sizeof(struct tegra_se_ll) * num_src_sgs) + | ||
643 | sizeof(u32); | ||
644 | se_dev->src_ll_buf = dma_alloc_coherent(se_dev->dev, | ||
645 | se_dev->src_ll_size, | ||
646 | &se_dev->src_ll_buf_adr, GFP_KERNEL); | ||
647 | if (!se_dev->src_ll_buf) { | ||
648 | dev_err(se_dev->dev, "can not allocate src lldma buffer\n"); | ||
649 | return -ENOMEM; | ||
650 | } | ||
651 | } | ||
652 | if (num_dst_sgs) { | ||
653 | se_dev->dst_ll_size = | ||
654 | (sizeof(struct tegra_se_ll) * num_dst_sgs) + | ||
655 | sizeof(u32); | ||
656 | se_dev->dst_ll_buf = dma_alloc_coherent(se_dev->dev, | ||
657 | se_dev->dst_ll_size, | ||
658 | &se_dev->dst_ll_buf_adr, GFP_KERNEL); | ||
659 | if (!se_dev->dst_ll_buf) { | ||
660 | dev_err(se_dev->dev, "can not allocate dst ll dma buffer\n"); | ||
661 | return -ENOMEM; | ||
662 | } | ||
663 | } | ||
664 | |||
665 | return 0; | ||
666 | } | ||
667 | |||
668 | static void tegra_se_free_ll_buf(struct tegra_se_dev *se_dev) | ||
669 | { | ||
670 | if (se_dev->src_ll_buf) { | ||
671 | dma_free_coherent(se_dev->dev, se_dev->src_ll_size, | ||
672 | se_dev->src_ll_buf, se_dev->src_ll_buf_adr); | ||
673 | se_dev->src_ll_buf = NULL; | ||
674 | } | ||
675 | |||
676 | if (se_dev->dst_ll_buf) { | ||
677 | dma_free_coherent(se_dev->dev, se_dev->dst_ll_size, | ||
678 | se_dev->dst_ll_buf, se_dev->dst_ll_buf_adr); | ||
679 | se_dev->dst_ll_buf = NULL; | ||
680 | } | ||
681 | } | ||
682 | |||
683 | static int tegra_se_setup_ablk_req(struct tegra_se_dev *se_dev, | ||
684 | struct ablkcipher_request *req) | ||
685 | { | ||
686 | struct scatterlist *src_sg, *dst_sg; | ||
687 | struct tegra_se_ll *src_ll, *dst_ll; | ||
688 | u32 total, num_src_sgs, num_dst_sgs; | ||
689 | int ret = 0; | ||
690 | |||
691 | num_src_sgs = tegra_se_count_sgs(req->src, req->nbytes); | ||
692 | num_dst_sgs = tegra_se_count_sgs(req->dst, req->nbytes); | ||
693 | |||
694 | if ((num_src_sgs > SE_MAX_SRC_SG_COUNT) || | ||
695 | (num_dst_sgs > SE_MAX_DST_SG_COUNT)) { | ||
696 | dev_err(se_dev->dev, "num of SG buffers are more\n"); | ||
697 | return -EINVAL; | ||
698 | } | ||
699 | |||
700 | *se_dev->src_ll_buf = num_src_sgs-1; | ||
701 | *se_dev->dst_ll_buf = num_dst_sgs-1; | ||
702 | |||
703 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
704 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
705 | |||
706 | src_sg = req->src; | ||
707 | dst_sg = req->dst; | ||
708 | total = req->nbytes; | ||
709 | |||
710 | while (total) { | ||
711 | ret = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
712 | if (!ret) { | ||
713 | dev_err(se_dev->dev, "dma_map_sg() error\n"); | ||
714 | return -EINVAL; | ||
715 | } | ||
716 | |||
717 | ret = dma_map_sg(se_dev->dev, dst_sg, 1, DMA_FROM_DEVICE); | ||
718 | if (!ret) { | ||
719 | dev_err(se_dev->dev, "dma_map_sg() error\n"); | ||
720 | dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
721 | return -EINVAL; | ||
722 | } | ||
723 | |||
724 | WARN_ON(src_sg->length != dst_sg->length); | ||
725 | src_ll->addr = sg_dma_address(src_sg); | ||
726 | src_ll->data_len = min(src_sg->length, total); | ||
727 | dst_ll->addr = sg_dma_address(dst_sg); | ||
728 | dst_ll->data_len = min(dst_sg->length, total); | ||
729 | total -= min(src_sg->length, total); | ||
730 | |||
731 | src_sg = sg_next(src_sg); | ||
732 | dst_sg = sg_next(dst_sg); | ||
733 | dst_ll++; | ||
734 | src_ll++; | ||
735 | WARN_ON(((total != 0) && (!src_sg || !dst_sg))); | ||
736 | } | ||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | static void tegra_se_dequeue_complete_req(struct tegra_se_dev *se_dev, | ||
741 | struct ablkcipher_request *req) | ||
742 | { | ||
743 | struct scatterlist *src_sg, *dst_sg; | ||
744 | u32 total; | ||
745 | |||
746 | if (req) { | ||
747 | src_sg = req->src; | ||
748 | dst_sg = req->dst; | ||
749 | total = req->nbytes; | ||
750 | while (total) { | ||
751 | dma_unmap_sg(se_dev->dev, dst_sg, 1, DMA_FROM_DEVICE); | ||
752 | dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
753 | total -= min(src_sg->length, total); | ||
754 | src_sg = sg_next(src_sg); | ||
755 | dst_sg = sg_next(dst_sg); | ||
756 | } | ||
757 | } | ||
758 | } | ||
759 | |||
760 | static void tegra_se_process_new_req(struct crypto_async_request *async_req) | ||
761 | { | ||
762 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
763 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | ||
764 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
765 | struct tegra_se_aes_context *aes_ctx = | ||
766 | crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
767 | int ret = 0; | ||
768 | |||
769 | /* take access to the hw */ | ||
770 | mutex_lock(&se_hw_lock); | ||
771 | |||
772 | /* write IV */ | ||
773 | if (req->info) { | ||
774 | if (req_ctx->op_mode == SE_AES_OP_MODE_CTR) { | ||
775 | tegra_se_write_seed(se_dev, (u32 *)req->info); | ||
776 | } else { | ||
777 | tegra_se_write_key_table(req->info, | ||
778 | TEGRA_SE_AES_IV_SIZE, | ||
779 | aes_ctx->slot->slot_num, | ||
780 | SE_KEY_TABLE_TYPE_ORGIV); | ||
781 | } | ||
782 | } | ||
783 | tegra_se_setup_ablk_req(se_dev, req); | ||
784 | tegra_se_config_algo(se_dev, req_ctx->op_mode, req_ctx->encrypt, | ||
785 | aes_ctx->keylen); | ||
786 | tegra_se_config_crypto(se_dev, req_ctx->op_mode, req_ctx->encrypt, | ||
787 | aes_ctx->slot->slot_num, req->info ? true : false); | ||
788 | ret = tegra_se_start_operation(se_dev, req->nbytes, false); | ||
789 | tegra_se_dequeue_complete_req(se_dev, req); | ||
790 | |||
791 | mutex_unlock(&se_hw_lock); | ||
792 | req->base.complete(&req->base, ret); | ||
793 | } | ||
794 | |||
795 | static irqreturn_t tegra_se_irq(int irq, void *dev) | ||
796 | { | ||
797 | struct tegra_se_dev *se_dev = dev; | ||
798 | u32 val; | ||
799 | |||
800 | val = se_readl(se_dev, SE_INT_STATUS_REG_OFFSET); | ||
801 | se_writel(se_dev, val, SE_INT_STATUS_REG_OFFSET); | ||
802 | |||
803 | if (val & SE_INT_ERROR(INT_SET)) | ||
804 | dev_err(se_dev->dev, "tegra_se_irq::error"); | ||
805 | |||
806 | if (val & SE_INT_OP_DONE(INT_SET)) | ||
807 | complete(&se_dev->complete); | ||
808 | |||
809 | return IRQ_HANDLED; | ||
810 | } | ||
811 | |||
812 | static void tegra_se_work_handler(struct work_struct *work) | ||
813 | { | ||
814 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
815 | struct crypto_async_request *async_req = NULL; | ||
816 | struct crypto_async_request *backlog = NULL; | ||
817 | |||
818 | pm_runtime_get_sync(se_dev->dev); | ||
819 | |||
820 | do { | ||
821 | spin_lock_irq(&se_dev->lock); | ||
822 | backlog = crypto_get_backlog(&se_dev->queue); | ||
823 | async_req = crypto_dequeue_request(&se_dev->queue); | ||
824 | if (!async_req) | ||
825 | se_dev->work_q_busy = false; | ||
826 | |||
827 | spin_unlock_irq(&se_dev->lock); | ||
828 | |||
829 | if (backlog) { | ||
830 | backlog->complete(backlog, -EINPROGRESS); | ||
831 | backlog = NULL; | ||
832 | } | ||
833 | |||
834 | if (async_req) { | ||
835 | tegra_se_process_new_req(async_req); | ||
836 | async_req = NULL; | ||
837 | } | ||
838 | } while (se_dev->work_q_busy); | ||
839 | pm_runtime_put(se_dev->dev); | ||
840 | } | ||
841 | |||
842 | static int tegra_se_aes_queue_req(struct ablkcipher_request *req) | ||
843 | { | ||
844 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
845 | unsigned long flags; | ||
846 | bool idle = true; | ||
847 | int err = 0; | ||
848 | |||
849 | if (!req->nbytes) | ||
850 | return -EINVAL; | ||
851 | |||
852 | spin_lock_irqsave(&se_dev->lock, flags); | ||
853 | err = ablkcipher_enqueue_request(&se_dev->queue, req); | ||
854 | if (se_dev->work_q_busy) | ||
855 | idle = false; | ||
856 | spin_unlock_irqrestore(&se_dev->lock, flags); | ||
857 | |||
858 | if (idle) { | ||
859 | spin_lock_irq(&se_dev->lock); | ||
860 | se_dev->work_q_busy = true; | ||
861 | spin_unlock_irq(&se_dev->lock); | ||
862 | queue_work(se_work_q, &se_work); | ||
863 | } | ||
864 | |||
865 | return err; | ||
866 | } | ||
867 | |||
868 | static int tegra_se_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
869 | { | ||
870 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
871 | |||
872 | req_ctx->encrypt = true; | ||
873 | req_ctx->op_mode = SE_AES_OP_MODE_CBC; | ||
874 | |||
875 | return tegra_se_aes_queue_req(req); | ||
876 | } | ||
877 | |||
878 | static int tegra_se_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
879 | { | ||
880 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
881 | |||
882 | req_ctx->encrypt = false; | ||
883 | req_ctx->op_mode = SE_AES_OP_MODE_CBC; | ||
884 | |||
885 | return tegra_se_aes_queue_req(req); | ||
886 | } | ||
887 | |||
888 | static int tegra_se_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
889 | { | ||
890 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
891 | |||
892 | req_ctx->encrypt = true; | ||
893 | req_ctx->op_mode = SE_AES_OP_MODE_ECB; | ||
894 | |||
895 | return tegra_se_aes_queue_req(req); | ||
896 | } | ||
897 | |||
898 | static int tegra_se_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
899 | { | ||
900 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
901 | |||
902 | req_ctx->encrypt = false; | ||
903 | req_ctx->op_mode = SE_AES_OP_MODE_ECB; | ||
904 | |||
905 | return tegra_se_aes_queue_req(req); | ||
906 | } | ||
907 | |||
908 | static int tegra_se_aes_ctr_encrypt(struct ablkcipher_request *req) | ||
909 | { | ||
910 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
911 | |||
912 | req_ctx->encrypt = true; | ||
913 | req_ctx->op_mode = SE_AES_OP_MODE_CTR; | ||
914 | |||
915 | return tegra_se_aes_queue_req(req); | ||
916 | } | ||
917 | |||
918 | static int tegra_se_aes_ctr_decrypt(struct ablkcipher_request *req) | ||
919 | { | ||
920 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
921 | |||
922 | req_ctx->encrypt = false; | ||
923 | req_ctx->op_mode = SE_AES_OP_MODE_CTR; | ||
924 | |||
925 | return tegra_se_aes_queue_req(req); | ||
926 | } | ||
927 | |||
928 | static int tegra_se_aes_ofb_encrypt(struct ablkcipher_request *req) | ||
929 | { | ||
930 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
931 | |||
932 | req_ctx->encrypt = true; | ||
933 | req_ctx->op_mode = SE_AES_OP_MODE_OFB; | ||
934 | |||
935 | return tegra_se_aes_queue_req(req); | ||
936 | } | ||
937 | |||
938 | static int tegra_se_aes_ofb_decrypt(struct ablkcipher_request *req) | ||
939 | { | ||
940 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
941 | |||
942 | req_ctx->encrypt = false; | ||
943 | req_ctx->op_mode = SE_AES_OP_MODE_OFB; | ||
944 | |||
945 | return tegra_se_aes_queue_req(req); | ||
946 | } | ||
947 | |||
948 | static int tegra_se_aes_setkey(struct crypto_ablkcipher *tfm, | ||
949 | const u8 *key, u32 keylen) | ||
950 | { | ||
951 | struct tegra_se_aes_context *ctx = crypto_ablkcipher_ctx(tfm); | ||
952 | struct tegra_se_dev *se_dev = ctx->se_dev; | ||
953 | struct tegra_se_slot *pslot; | ||
954 | u8 *pdata = (u8 *)key; | ||
955 | |||
956 | if (!ctx) { | ||
957 | dev_err(se_dev->dev, "invalid context"); | ||
958 | return -EINVAL; | ||
959 | } | ||
960 | |||
961 | if ((keylen != TEGRA_SE_KEY_128_SIZE) && | ||
962 | (keylen != TEGRA_SE_KEY_192_SIZE) && | ||
963 | (keylen != TEGRA_SE_KEY_256_SIZE)) { | ||
964 | dev_err(se_dev->dev, "invalid key size"); | ||
965 | return -EINVAL; | ||
966 | } | ||
967 | |||
968 | if (key) { | ||
969 | if (!ctx->slot || (ctx->slot && | ||
970 | ctx->slot->slot_num == ssk_slot.slot_num)) { | ||
971 | pslot = tegra_se_alloc_key_slot(); | ||
972 | if (!pslot) { | ||
973 | dev_err(se_dev->dev, "no free key slot\n"); | ||
974 | return -ENOMEM; | ||
975 | } | ||
976 | ctx->slot = pslot; | ||
977 | } | ||
978 | ctx->keylen = keylen; | ||
979 | } else { | ||
980 | tegra_se_free_key_slot(ctx->slot); | ||
981 | ctx->slot = &ssk_slot; | ||
982 | ctx->keylen = AES_KEYSIZE_128; | ||
983 | } | ||
984 | |||
985 | /* take access to the hw */ | ||
986 | mutex_lock(&se_hw_lock); | ||
987 | pm_runtime_get_sync(se_dev->dev); | ||
988 | |||
989 | /* load the key */ | ||
990 | tegra_se_write_key_table(pdata, keylen, ctx->slot->slot_num, | ||
991 | SE_KEY_TABLE_TYPE_KEY); | ||
992 | |||
993 | pm_runtime_put(se_dev->dev); | ||
994 | mutex_unlock(&se_hw_lock); | ||
995 | |||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | static int tegra_se_aes_cra_init(struct crypto_tfm *tfm) | ||
1000 | { | ||
1001 | struct tegra_se_aes_context *ctx = crypto_tfm_ctx(tfm); | ||
1002 | |||
1003 | ctx->se_dev = sg_tegra_se_dev; | ||
1004 | tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_se_req_context); | ||
1005 | |||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | static void tegra_se_aes_cra_exit(struct crypto_tfm *tfm) | ||
1010 | { | ||
1011 | struct tegra_se_aes_context *ctx = crypto_tfm_ctx(tfm); | ||
1012 | |||
1013 | tegra_se_free_key_slot(ctx->slot); | ||
1014 | ctx->slot = NULL; | ||
1015 | } | ||
1016 | |||
1017 | static int tegra_se_rng_init(struct crypto_tfm *tfm) | ||
1018 | { | ||
1019 | struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm); | ||
1020 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
1021 | |||
1022 | rng_ctx->se_dev = se_dev; | ||
1023 | rng_ctx->dt_buf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1024 | &rng_ctx->dt_buf_adr, GFP_KERNEL); | ||
1025 | if (!rng_ctx->dt_buf) { | ||
1026 | dev_err(se_dev->dev, "can not allocate rng dma buffer"); | ||
1027 | return -ENOMEM; | ||
1028 | } | ||
1029 | |||
1030 | rng_ctx->rng_buf = dma_alloc_coherent(rng_ctx->se_dev->dev, | ||
1031 | TEGRA_SE_RNG_DT_SIZE, &rng_ctx->rng_buf_adr, GFP_KERNEL); | ||
1032 | if (!rng_ctx->rng_buf) { | ||
1033 | dev_err(se_dev->dev, "can not allocate rng dma buffer"); | ||
1034 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1035 | rng_ctx->dt_buf, rng_ctx->dt_buf_adr); | ||
1036 | return -ENOMEM; | ||
1037 | } | ||
1038 | |||
1039 | rng_ctx->slot = tegra_se_alloc_key_slot(); | ||
1040 | |||
1041 | if (!rng_ctx->slot) { | ||
1042 | dev_err(rng_ctx->se_dev->dev, "no free slot\n"); | ||
1043 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1044 | rng_ctx->dt_buf, rng_ctx->dt_buf_adr); | ||
1045 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1046 | rng_ctx->rng_buf, rng_ctx->rng_buf_adr); | ||
1047 | return -ENOMEM; | ||
1048 | } | ||
1049 | |||
1050 | return 0; | ||
1051 | } | ||
1052 | |||
1053 | static void tegra_se_rng_exit(struct crypto_tfm *tfm) | ||
1054 | { | ||
1055 | struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm); | ||
1056 | |||
1057 | if (rng_ctx->dt_buf) { | ||
1058 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1059 | rng_ctx->dt_buf, rng_ctx->dt_buf_adr); | ||
1060 | } | ||
1061 | |||
1062 | if (rng_ctx->rng_buf) { | ||
1063 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1064 | rng_ctx->rng_buf, rng_ctx->rng_buf_adr); | ||
1065 | } | ||
1066 | |||
1067 | tegra_se_free_key_slot(rng_ctx->slot); | ||
1068 | rng_ctx->slot = NULL; | ||
1069 | rng_ctx->se_dev = NULL; | ||
1070 | } | ||
1071 | |||
1072 | static int tegra_se_rng_get_random(struct crypto_rng *tfm, u8 *rdata, u32 dlen) | ||
1073 | { | ||
1074 | struct tegra_se_rng_context *rng_ctx = crypto_rng_ctx(tfm); | ||
1075 | struct tegra_se_dev *se_dev = rng_ctx->se_dev; | ||
1076 | struct tegra_se_ll *src_ll, *dst_ll; | ||
1077 | unsigned char *dt_buf = (unsigned char *)rng_ctx->dt_buf; | ||
1078 | u8 *rdata_addr; | ||
1079 | int ret = 0, i, j, num_blocks, data_len = 0; | ||
1080 | |||
1081 | num_blocks = (dlen / TEGRA_SE_RNG_DT_SIZE); | ||
1082 | |||
1083 | data_len = (dlen % TEGRA_SE_RNG_DT_SIZE); | ||
1084 | if (data_len == 0) | ||
1085 | num_blocks = num_blocks - 1; | ||
1086 | |||
1087 | /* take access to the hw */ | ||
1088 | mutex_lock(&se_hw_lock); | ||
1089 | pm_runtime_get_sync(se_dev->dev); | ||
1090 | |||
1091 | *se_dev->src_ll_buf = 0; | ||
1092 | *se_dev->dst_ll_buf = 0; | ||
1093 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1094 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
1095 | src_ll->addr = rng_ctx->dt_buf_adr; | ||
1096 | src_ll->data_len = TEGRA_SE_RNG_DT_SIZE; | ||
1097 | dst_ll->addr = rng_ctx->rng_buf_adr; | ||
1098 | dst_ll->data_len = TEGRA_SE_RNG_DT_SIZE; | ||
1099 | |||
1100 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_X931, true, | ||
1101 | TEGRA_SE_KEY_128_SIZE); | ||
1102 | tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_RNG_X931, true, | ||
1103 | rng_ctx->slot->slot_num, rng_ctx->use_org_iv); | ||
1104 | for (j = 0; j <= num_blocks; j++) { | ||
1105 | ret = tegra_se_start_operation(se_dev, | ||
1106 | TEGRA_SE_RNG_DT_SIZE, false); | ||
1107 | |||
1108 | if (!ret) { | ||
1109 | rdata_addr = (rdata + (j * TEGRA_SE_RNG_DT_SIZE)); | ||
1110 | |||
1111 | if (data_len && num_blocks == j) { | ||
1112 | memcpy(rdata_addr, rng_ctx->rng_buf, data_len); | ||
1113 | } else { | ||
1114 | memcpy(rdata_addr, | ||
1115 | rng_ctx->rng_buf, TEGRA_SE_RNG_DT_SIZE); | ||
1116 | } | ||
1117 | |||
1118 | /* update DT vector */ | ||
1119 | for (i = TEGRA_SE_RNG_DT_SIZE - 1; i >= 0; i--) { | ||
1120 | dt_buf[i] += 1; | ||
1121 | if (dt_buf[i] != 0) | ||
1122 | break; | ||
1123 | } | ||
1124 | } else { | ||
1125 | dlen = 0; | ||
1126 | } | ||
1127 | if (rng_ctx->use_org_iv) { | ||
1128 | rng_ctx->use_org_iv = false; | ||
1129 | tegra_se_config_crypto(se_dev, | ||
1130 | SE_AES_OP_MODE_RNG_X931, true, | ||
1131 | rng_ctx->slot->slot_num, rng_ctx->use_org_iv); | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | pm_runtime_put(se_dev->dev); | ||
1136 | mutex_unlock(&se_hw_lock); | ||
1137 | |||
1138 | return dlen; | ||
1139 | } | ||
1140 | |||
1141 | static int tegra_se_rng_reset(struct crypto_rng *tfm, u8 *seed, u32 slen) | ||
1142 | { | ||
1143 | struct tegra_se_rng_context *rng_ctx = crypto_rng_ctx(tfm); | ||
1144 | struct tegra_se_dev *se_dev = rng_ctx->se_dev; | ||
1145 | u8 *iv = seed; | ||
1146 | u8 *key = (u8 *)(seed + TEGRA_SE_RNG_IV_SIZE); | ||
1147 | u8 *dt = key + TEGRA_SE_RNG_KEY_SIZE; | ||
1148 | struct timespec ts; | ||
1149 | u64 nsec, tmp[2]; | ||
1150 | |||
1151 | BUG_ON(!seed); | ||
1152 | |||
1153 | /* take access to the hw */ | ||
1154 | mutex_lock(&se_hw_lock); | ||
1155 | pm_runtime_get_sync(se_dev->dev); | ||
1156 | |||
1157 | tegra_se_write_key_table(key, TEGRA_SE_RNG_KEY_SIZE, | ||
1158 | rng_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_KEY); | ||
1159 | |||
1160 | tegra_se_write_key_table(iv, TEGRA_SE_RNG_IV_SIZE, | ||
1161 | rng_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV); | ||
1162 | |||
1163 | pm_runtime_put(se_dev->dev); | ||
1164 | mutex_unlock(&se_hw_lock); | ||
1165 | |||
1166 | if (slen < TEGRA_SE_RNG_SEED_SIZE) { | ||
1167 | getnstimeofday(&ts); | ||
1168 | nsec = timespec_to_ns(&ts); | ||
1169 | do_div(nsec, 1000); | ||
1170 | nsec ^= se_dev->ctr << 56; | ||
1171 | se_dev->ctr++; | ||
1172 | tmp[0] = nsec; | ||
1173 | tmp[1] = tegra_chip_uid(); | ||
1174 | memcpy(rng_ctx->dt_buf, (u8 *)tmp, TEGRA_SE_RNG_DT_SIZE); | ||
1175 | } else { | ||
1176 | memcpy(rng_ctx->dt_buf, dt, TEGRA_SE_RNG_DT_SIZE); | ||
1177 | } | ||
1178 | |||
1179 | rng_ctx->use_org_iv = true; | ||
1180 | |||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | int tegra_se_sha_init(struct ahash_request *req) | ||
1185 | { | ||
1186 | return 0; | ||
1187 | } | ||
1188 | |||
1189 | int tegra_se_sha_update(struct ahash_request *req) | ||
1190 | { | ||
1191 | return 0; | ||
1192 | } | ||
1193 | |||
1194 | int tegra_se_sha_finup(struct ahash_request *req) | ||
1195 | { | ||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | int tegra_se_sha_final(struct ahash_request *req) | ||
1200 | { | ||
1201 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1202 | struct tegra_se_sha_context *sha_ctx = crypto_ahash_ctx(tfm); | ||
1203 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
1204 | struct scatterlist *src_sg; | ||
1205 | struct tegra_se_ll *src_ll; | ||
1206 | u32 total, num_sgs; | ||
1207 | int err = 0; | ||
1208 | |||
1209 | if (!req->nbytes) | ||
1210 | return -EINVAL; | ||
1211 | |||
1212 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | ||
1213 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA1; | ||
1214 | |||
1215 | if (crypto_ahash_digestsize(tfm) == SHA224_DIGEST_SIZE) | ||
1216 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA224; | ||
1217 | |||
1218 | if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE) | ||
1219 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA256; | ||
1220 | |||
1221 | if (crypto_ahash_digestsize(tfm) == SHA384_DIGEST_SIZE) | ||
1222 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA384; | ||
1223 | |||
1224 | if (crypto_ahash_digestsize(tfm) == SHA512_DIGEST_SIZE) | ||
1225 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA512; | ||
1226 | |||
1227 | /* take access to the hw */ | ||
1228 | mutex_lock(&se_hw_lock); | ||
1229 | pm_runtime_get_sync(se_dev->dev); | ||
1230 | |||
1231 | num_sgs = tegra_se_count_sgs(req->src, req->nbytes); | ||
1232 | if ((num_sgs > SE_MAX_SRC_SG_COUNT)) { | ||
1233 | dev_err(se_dev->dev, "num of SG buffers are more\n"); | ||
1234 | pm_runtime_put(se_dev->dev); | ||
1235 | mutex_unlock(&se_hw_lock); | ||
1236 | return -EINVAL; | ||
1237 | } | ||
1238 | *se_dev->src_ll_buf = num_sgs-1; | ||
1239 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1240 | src_sg = req->src; | ||
1241 | total = req->nbytes; | ||
1242 | |||
1243 | while (total) { | ||
1244 | err = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
1245 | if (!err) { | ||
1246 | dev_err(se_dev->dev, "dma_map_sg() error\n"); | ||
1247 | pm_runtime_put(se_dev->dev); | ||
1248 | mutex_unlock(&se_hw_lock); | ||
1249 | return -EINVAL; | ||
1250 | } | ||
1251 | src_ll->addr = sg_dma_address(src_sg); | ||
1252 | src_ll->data_len = src_sg->length; | ||
1253 | |||
1254 | total -= src_sg->length; | ||
1255 | src_sg = sg_next(src_sg); | ||
1256 | src_ll++; | ||
1257 | } | ||
1258 | |||
1259 | tegra_se_config_algo(se_dev, sha_ctx->op_mode, false, 0); | ||
1260 | tegra_se_config_sha(se_dev, req->nbytes); | ||
1261 | err = tegra_se_start_operation(se_dev, 0, false); | ||
1262 | if (!err) { | ||
1263 | tegra_se_read_hash_result(se_dev, req->result, | ||
1264 | crypto_ahash_digestsize(tfm), true); | ||
1265 | if ((sha_ctx->op_mode == SE_AES_OP_MODE_SHA384) || | ||
1266 | (sha_ctx->op_mode == SE_AES_OP_MODE_SHA512)) { | ||
1267 | u32 *result = (u32 *)req->result; | ||
1268 | u32 temp, i; | ||
1269 | |||
1270 | for (i = 0; i < crypto_ahash_digestsize(tfm)/4; | ||
1271 | i += 2) { | ||
1272 | temp = result[i]; | ||
1273 | result[i] = result[i+1]; | ||
1274 | result[i+1] = temp; | ||
1275 | } | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1279 | src_sg = req->src; | ||
1280 | total = req->nbytes; | ||
1281 | while (total) { | ||
1282 | dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
1283 | total -= src_sg->length; | ||
1284 | src_sg = sg_next(src_sg); | ||
1285 | } | ||
1286 | pm_runtime_put(se_dev->dev); | ||
1287 | mutex_unlock(&se_hw_lock); | ||
1288 | |||
1289 | return err; | ||
1290 | } | ||
1291 | |||
1292 | static int tegra_se_sha_digest(struct ahash_request *req) | ||
1293 | { | ||
1294 | return tegra_se_sha_init(req) ?: tegra_se_sha_final(req); | ||
1295 | } | ||
1296 | |||
1297 | int tegra_se_sha_cra_init(struct crypto_tfm *tfm) | ||
1298 | { | ||
1299 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1300 | sizeof(struct tegra_se_sha_context)); | ||
1301 | return 0; | ||
1302 | } | ||
1303 | |||
1304 | void tegra_se_sha_cra_exit(struct crypto_tfm *tfm) | ||
1305 | { | ||
1306 | /* do nothing */ | ||
1307 | } | ||
1308 | |||
1309 | int tegra_se_aes_cmac_init(struct ahash_request *req) | ||
1310 | { | ||
1311 | |||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | int tegra_se_aes_cmac_update(struct ahash_request *req) | ||
1316 | { | ||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | int tegra_se_aes_cmac_final(struct ahash_request *req) | ||
1321 | { | ||
1322 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1323 | struct tegra_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(tfm); | ||
1324 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
1325 | struct scatterlist *src_sg; | ||
1326 | struct tegra_se_ll *src_ll; | ||
1327 | struct sg_mapping_iter miter; | ||
1328 | u32 num_sgs, blocks_to_process, last_block_bytes = 0, bytes_to_copy = 0; | ||
1329 | u8 piv[TEGRA_SE_AES_IV_SIZE]; | ||
1330 | int total, ret = 0, i = 0, mapped_sg_count = 0; | ||
1331 | bool padding_needed = false; | ||
1332 | unsigned long flags; | ||
1333 | unsigned int sg_flags = SG_MITER_ATOMIC; | ||
1334 | u8 *temp_buffer = NULL; | ||
1335 | bool use_orig_iv = true; | ||
1336 | |||
1337 | /* take access to the hw */ | ||
1338 | mutex_lock(&se_hw_lock); | ||
1339 | pm_runtime_get_sync(se_dev->dev); | ||
1340 | |||
1341 | blocks_to_process = req->nbytes / TEGRA_SE_AES_BLOCK_SIZE; | ||
1342 | /* num of bytes less than block size */ | ||
1343 | if ((req->nbytes % TEGRA_SE_AES_BLOCK_SIZE) || !blocks_to_process) { | ||
1344 | padding_needed = true; | ||
1345 | last_block_bytes = req->nbytes % TEGRA_SE_AES_BLOCK_SIZE; | ||
1346 | } else { | ||
1347 | /* decrement num of blocks */ | ||
1348 | blocks_to_process--; | ||
1349 | if (blocks_to_process) { | ||
1350 | /* there are blocks to process and find last block | ||
1351 | bytes */ | ||
1352 | last_block_bytes = req->nbytes - | ||
1353 | (blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE); | ||
1354 | } else { | ||
1355 | /* this is the last block and equal to block size */ | ||
1356 | last_block_bytes = req->nbytes; | ||
1357 | } | ||
1358 | } | ||
1359 | |||
1360 | /* first process all blocks except last block */ | ||
1361 | if (blocks_to_process) { | ||
1362 | num_sgs = tegra_se_count_sgs(req->src, req->nbytes); | ||
1363 | if (num_sgs > SE_MAX_SRC_SG_COUNT) { | ||
1364 | dev_err(se_dev->dev, "num of SG buffers are more\n"); | ||
1365 | goto out; | ||
1366 | } | ||
1367 | *se_dev->src_ll_buf = num_sgs - 1; | ||
1368 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1369 | src_sg = req->src; | ||
1370 | total = blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE; | ||
1371 | while (total > 0) { | ||
1372 | ret = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
1373 | mapped_sg_count++; | ||
1374 | if (!ret) { | ||
1375 | dev_err(se_dev->dev, "dma_map_sg() error\n"); | ||
1376 | goto out; | ||
1377 | } | ||
1378 | src_ll->addr = sg_dma_address(src_sg); | ||
1379 | if (total > src_sg->length) | ||
1380 | src_ll->data_len = src_sg->length; | ||
1381 | else | ||
1382 | src_ll->data_len = total; | ||
1383 | |||
1384 | total -= src_sg->length; | ||
1385 | if (total > 0) { | ||
1386 | src_sg = sg_next(src_sg); | ||
1387 | src_ll++; | ||
1388 | } | ||
1389 | WARN_ON(((total != 0) && (!src_sg))); | ||
1390 | } | ||
1391 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CMAC, true, | ||
1392 | cmac_ctx->keylen); | ||
1393 | /* write zero IV */ | ||
1394 | memset(piv, 0, TEGRA_SE_AES_IV_SIZE); | ||
1395 | tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE, | ||
1396 | cmac_ctx->slot->slot_num, | ||
1397 | SE_KEY_TABLE_TYPE_ORGIV); | ||
1398 | tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CMAC, true, | ||
1399 | cmac_ctx->slot->slot_num, true); | ||
1400 | tegra_se_start_operation(se_dev, | ||
1401 | blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE, false); | ||
1402 | src_sg = req->src; | ||
1403 | while (mapped_sg_count--) { | ||
1404 | dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
1405 | src_sg = sg_next(src_sg); | ||
1406 | } | ||
1407 | use_orig_iv = false; | ||
1408 | } | ||
1409 | |||
1410 | /* get the last block bytes from the sg_dma buffer using miter */ | ||
1411 | src_sg = req->src; | ||
1412 | num_sgs = tegra_se_count_sgs(req->src, req->nbytes); | ||
1413 | sg_flags |= SG_MITER_FROM_SG; | ||
1414 | sg_miter_start(&miter, req->src, num_sgs, sg_flags); | ||
1415 | local_irq_save(flags); | ||
1416 | total = 0; | ||
1417 | cmac_ctx->buffer = dma_alloc_coherent(se_dev->dev, | ||
1418 | TEGRA_SE_AES_BLOCK_SIZE, | ||
1419 | &cmac_ctx->dma_addr, GFP_KERNEL); | ||
1420 | |||
1421 | if (!cmac_ctx->buffer) | ||
1422 | goto out; | ||
1423 | |||
1424 | temp_buffer = cmac_ctx->buffer; | ||
1425 | while (sg_miter_next(&miter) && total < req->nbytes) { | ||
1426 | unsigned int len; | ||
1427 | len = min(miter.length, req->nbytes - total); | ||
1428 | if ((req->nbytes - (total + len)) <= last_block_bytes) { | ||
1429 | bytes_to_copy = | ||
1430 | last_block_bytes - | ||
1431 | (req->nbytes - (total + len)); | ||
1432 | memcpy(temp_buffer, miter.addr + (len - bytes_to_copy), | ||
1433 | bytes_to_copy); | ||
1434 | last_block_bytes -= bytes_to_copy; | ||
1435 | temp_buffer += bytes_to_copy; | ||
1436 | } | ||
1437 | total += len; | ||
1438 | } | ||
1439 | sg_miter_stop(&miter); | ||
1440 | local_irq_restore(flags); | ||
1441 | |||
1442 | /* process last block */ | ||
1443 | if (padding_needed) { | ||
1444 | /* pad with 0x80, 0, 0 ... */ | ||
1445 | last_block_bytes = req->nbytes % TEGRA_SE_AES_BLOCK_SIZE; | ||
1446 | cmac_ctx->buffer[last_block_bytes] = 0x80; | ||
1447 | for (i = last_block_bytes+1; i < TEGRA_SE_AES_BLOCK_SIZE; i++) | ||
1448 | cmac_ctx->buffer[i] = 0; | ||
1449 | /* XOR with K2 */ | ||
1450 | for (i = 0; i < TEGRA_SE_AES_BLOCK_SIZE; i++) | ||
1451 | cmac_ctx->buffer[i] ^= cmac_ctx->K2[i]; | ||
1452 | } else { | ||
1453 | /* XOR with K1 */ | ||
1454 | for (i = 0; i < TEGRA_SE_AES_BLOCK_SIZE; i++) | ||
1455 | cmac_ctx->buffer[i] ^= cmac_ctx->K1[i]; | ||
1456 | } | ||
1457 | *se_dev->src_ll_buf = 0; | ||
1458 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1459 | src_ll->addr = cmac_ctx->dma_addr; | ||
1460 | src_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; | ||
1461 | |||
1462 | if (use_orig_iv) { | ||
1463 | /* use zero IV, this is when num of bytes is | ||
1464 | less <= block size */ | ||
1465 | memset(piv, 0, TEGRA_SE_AES_IV_SIZE); | ||
1466 | tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE, | ||
1467 | cmac_ctx->slot->slot_num, | ||
1468 | SE_KEY_TABLE_TYPE_ORGIV); | ||
1469 | } | ||
1470 | |||
1471 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CMAC, true, | ||
1472 | cmac_ctx->keylen); | ||
1473 | tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CMAC, true, | ||
1474 | cmac_ctx->slot->slot_num, use_orig_iv); | ||
1475 | tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE, false); | ||
1476 | tegra_se_read_hash_result(se_dev, req->result, | ||
1477 | TEGRA_SE_AES_CMAC_DIGEST_SIZE, false); | ||
1478 | |||
1479 | out: | ||
1480 | pm_runtime_put(se_dev->dev); | ||
1481 | mutex_unlock(&se_hw_lock); | ||
1482 | |||
1483 | if (cmac_ctx->buffer) | ||
1484 | dma_free_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, | ||
1485 | cmac_ctx->buffer, cmac_ctx->dma_addr); | ||
1486 | |||
1487 | return 0; | ||
1488 | } | ||
1489 | |||
1490 | int tegra_se_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
1491 | unsigned int keylen) | ||
1492 | { | ||
1493 | struct tegra_se_aes_cmac_context *ctx = crypto_ahash_ctx(tfm); | ||
1494 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
1495 | struct tegra_se_ll *src_ll, *dst_ll; | ||
1496 | struct tegra_se_slot *pslot; | ||
1497 | u8 piv[TEGRA_SE_AES_IV_SIZE]; | ||
1498 | u32 *pbuf; | ||
1499 | dma_addr_t pbuf_adr; | ||
1500 | int ret = 0; | ||
1501 | u8 const rb = 0x87; | ||
1502 | u8 msb; | ||
1503 | |||
1504 | if (!ctx) { | ||
1505 | dev_err(se_dev->dev, "invalid context"); | ||
1506 | return -EINVAL; | ||
1507 | } | ||
1508 | |||
1509 | if ((keylen != TEGRA_SE_KEY_128_SIZE) && | ||
1510 | (keylen != TEGRA_SE_KEY_192_SIZE) && | ||
1511 | (keylen != TEGRA_SE_KEY_256_SIZE)) { | ||
1512 | dev_err(se_dev->dev, "invalid key size"); | ||
1513 | return -EINVAL; | ||
1514 | } | ||
1515 | |||
1516 | if (key) { | ||
1517 | if (!ctx->slot || (ctx->slot && | ||
1518 | ctx->slot->slot_num == ssk_slot.slot_num)) { | ||
1519 | pslot = tegra_se_alloc_key_slot(); | ||
1520 | if (!pslot) { | ||
1521 | dev_err(se_dev->dev, "no free key slot\n"); | ||
1522 | return -ENOMEM; | ||
1523 | } | ||
1524 | ctx->slot = pslot; | ||
1525 | } | ||
1526 | ctx->keylen = keylen; | ||
1527 | } else { | ||
1528 | tegra_se_free_key_slot(ctx->slot); | ||
1529 | ctx->slot = &ssk_slot; | ||
1530 | ctx->keylen = AES_KEYSIZE_128; | ||
1531 | } | ||
1532 | |||
1533 | pbuf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, | ||
1534 | &pbuf_adr, GFP_KERNEL); | ||
1535 | if (!pbuf) { | ||
1536 | dev_err(se_dev->dev, "can not allocate dma buffer"); | ||
1537 | return -ENOMEM; | ||
1538 | } | ||
1539 | memset(pbuf, 0, TEGRA_SE_AES_BLOCK_SIZE); | ||
1540 | |||
1541 | /* take access to the hw */ | ||
1542 | mutex_lock(&se_hw_lock); | ||
1543 | pm_runtime_get_sync(se_dev->dev); | ||
1544 | |||
1545 | *se_dev->src_ll_buf = 0; | ||
1546 | *se_dev->dst_ll_buf = 0; | ||
1547 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1548 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
1549 | |||
1550 | src_ll->addr = pbuf_adr; | ||
1551 | src_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; | ||
1552 | dst_ll->addr = pbuf_adr; | ||
1553 | dst_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; | ||
1554 | |||
1555 | /* load the key */ | ||
1556 | tegra_se_write_key_table((u8 *)key, keylen, | ||
1557 | ctx->slot->slot_num, SE_KEY_TABLE_TYPE_KEY); | ||
1558 | |||
1559 | /* write zero IV */ | ||
1560 | memset(piv, 0, TEGRA_SE_AES_IV_SIZE); | ||
1561 | |||
1562 | /* load IV */ | ||
1563 | tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE, | ||
1564 | ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV); | ||
1565 | |||
1566 | /* config crypto algo */ | ||
1567 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CBC, true, keylen); | ||
1568 | |||
1569 | tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CBC, true, | ||
1570 | ctx->slot->slot_num, true); | ||
1571 | |||
1572 | ret = tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE, false); | ||
1573 | if (ret) { | ||
1574 | dev_err(se_dev->dev, "tegra_se_aes_cmac_setkey:: start op failed\n"); | ||
1575 | goto out; | ||
1576 | } | ||
1577 | |||
1578 | /* compute K1 subkey */ | ||
1579 | memcpy(ctx->K1, pbuf, TEGRA_SE_AES_BLOCK_SIZE); | ||
1580 | tegra_se_leftshift_onebit(ctx->K1, TEGRA_SE_AES_BLOCK_SIZE, &msb); | ||
1581 | if (msb) | ||
1582 | ctx->K1[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb; | ||
1583 | |||
1584 | /* compute K2 subkey */ | ||
1585 | memcpy(ctx->K2, ctx->K1, TEGRA_SE_AES_BLOCK_SIZE); | ||
1586 | tegra_se_leftshift_onebit(ctx->K2, TEGRA_SE_AES_BLOCK_SIZE, &msb); | ||
1587 | |||
1588 | if (msb) | ||
1589 | ctx->K2[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb; | ||
1590 | |||
1591 | out: | ||
1592 | pm_runtime_put(se_dev->dev); | ||
1593 | mutex_unlock(&se_hw_lock); | ||
1594 | |||
1595 | if (pbuf) { | ||
1596 | dma_free_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, | ||
1597 | pbuf, pbuf_adr); | ||
1598 | } | ||
1599 | |||
1600 | return 0; | ||
1601 | } | ||
1602 | |||
1603 | int tegra_se_aes_cmac_digest(struct ahash_request *req) | ||
1604 | { | ||
1605 | return tegra_se_aes_cmac_init(req) ?: tegra_se_aes_cmac_final(req); | ||
1606 | } | ||
1607 | |||
1608 | int tegra_se_aes_cmac_finup(struct ahash_request *req) | ||
1609 | { | ||
1610 | return 0; | ||
1611 | } | ||
1612 | |||
1613 | int tegra_se_aes_cmac_cra_init(struct crypto_tfm *tfm) | ||
1614 | { | ||
1615 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1616 | sizeof(struct tegra_se_aes_cmac_context)); | ||
1617 | |||
1618 | return 0; | ||
1619 | } | ||
1620 | void tegra_se_aes_cmac_cra_exit(struct crypto_tfm *tfm) | ||
1621 | { | ||
1622 | struct tegra_se_aes_cmac_context *ctx = crypto_tfm_ctx(tfm); | ||
1623 | |||
1624 | tegra_se_free_key_slot(ctx->slot); | ||
1625 | ctx->slot = NULL; | ||
1626 | } | ||
1627 | |||
1628 | static struct crypto_alg aes_algs[] = { | ||
1629 | { | ||
1630 | .cra_name = "cbc(aes)", | ||
1631 | .cra_driver_name = "cbc-aes-tegra", | ||
1632 | .cra_priority = 300, | ||
1633 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1634 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1635 | .cra_ctxsize = sizeof(struct tegra_se_aes_context), | ||
1636 | .cra_alignmask = 0, | ||
1637 | .cra_type = &crypto_ablkcipher_type, | ||
1638 | .cra_module = THIS_MODULE, | ||
1639 | .cra_init = tegra_se_aes_cra_init, | ||
1640 | .cra_exit = tegra_se_aes_cra_exit, | ||
1641 | .cra_u.ablkcipher = { | ||
1642 | .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, | ||
1643 | .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, | ||
1644 | .ivsize = TEGRA_SE_AES_IV_SIZE, | ||
1645 | .setkey = tegra_se_aes_setkey, | ||
1646 | .encrypt = tegra_se_aes_cbc_encrypt, | ||
1647 | .decrypt = tegra_se_aes_cbc_decrypt, | ||
1648 | } | ||
1649 | }, { | ||
1650 | .cra_name = "ecb(aes)", | ||
1651 | .cra_driver_name = "ecb-aes-tegra", | ||
1652 | .cra_priority = 300, | ||
1653 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1654 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1655 | .cra_ctxsize = sizeof(struct tegra_se_aes_context), | ||
1656 | .cra_alignmask = 0, | ||
1657 | .cra_type = &crypto_ablkcipher_type, | ||
1658 | .cra_module = THIS_MODULE, | ||
1659 | .cra_init = tegra_se_aes_cra_init, | ||
1660 | .cra_exit = tegra_se_aes_cra_exit, | ||
1661 | .cra_u.ablkcipher = { | ||
1662 | .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, | ||
1663 | .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, | ||
1664 | .ivsize = TEGRA_SE_AES_IV_SIZE, | ||
1665 | .setkey = tegra_se_aes_setkey, | ||
1666 | .encrypt = tegra_se_aes_ecb_encrypt, | ||
1667 | .decrypt = tegra_se_aes_ecb_decrypt, | ||
1668 | } | ||
1669 | }, { | ||
1670 | .cra_name = "ctr(aes)", | ||
1671 | .cra_driver_name = "ctr-aes-tegra", | ||
1672 | .cra_priority = 300, | ||
1673 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1674 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1675 | .cra_ctxsize = sizeof(struct tegra_se_aes_context), | ||
1676 | .cra_alignmask = 0, | ||
1677 | .cra_type = &crypto_ablkcipher_type, | ||
1678 | .cra_module = THIS_MODULE, | ||
1679 | .cra_init = tegra_se_aes_cra_init, | ||
1680 | .cra_exit = tegra_se_aes_cra_exit, | ||
1681 | .cra_u.ablkcipher = { | ||
1682 | .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, | ||
1683 | .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, | ||
1684 | .ivsize = TEGRA_SE_AES_IV_SIZE, | ||
1685 | .setkey = tegra_se_aes_setkey, | ||
1686 | .encrypt = tegra_se_aes_ctr_encrypt, | ||
1687 | .decrypt = tegra_se_aes_ctr_decrypt, | ||
1688 | .geniv = "eseqiv", | ||
1689 | } | ||
1690 | }, { | ||
1691 | .cra_name = "ofb(aes)", | ||
1692 | .cra_driver_name = "ofb-aes-tegra", | ||
1693 | .cra_priority = 300, | ||
1694 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1695 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1696 | .cra_ctxsize = sizeof(struct tegra_se_aes_context), | ||
1697 | .cra_alignmask = 0, | ||
1698 | .cra_type = &crypto_ablkcipher_type, | ||
1699 | .cra_module = THIS_MODULE, | ||
1700 | .cra_init = tegra_se_aes_cra_init, | ||
1701 | .cra_exit = tegra_se_aes_cra_exit, | ||
1702 | .cra_u.ablkcipher = { | ||
1703 | .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, | ||
1704 | .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, | ||
1705 | .ivsize = TEGRA_SE_AES_IV_SIZE, | ||
1706 | .setkey = tegra_se_aes_setkey, | ||
1707 | .encrypt = tegra_se_aes_ofb_encrypt, | ||
1708 | .decrypt = tegra_se_aes_ofb_decrypt, | ||
1709 | .geniv = "eseqiv", | ||
1710 | } | ||
1711 | }, { | ||
1712 | .cra_name = "ansi_cprng", | ||
1713 | .cra_driver_name = "rng-aes-tegra", | ||
1714 | .cra_priority = 100, | ||
1715 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | ||
1716 | .cra_ctxsize = sizeof(struct tegra_se_rng_context), | ||
1717 | .cra_type = &crypto_rng_type, | ||
1718 | .cra_module = THIS_MODULE, | ||
1719 | .cra_init = tegra_se_rng_init, | ||
1720 | .cra_exit = tegra_se_rng_exit, | ||
1721 | .cra_u = { | ||
1722 | .rng = { | ||
1723 | .rng_make_random = tegra_se_rng_get_random, | ||
1724 | .rng_reset = tegra_se_rng_reset, | ||
1725 | .seedsize = TEGRA_SE_RNG_SEED_SIZE, | ||
1726 | } | ||
1727 | } | ||
1728 | } | ||
1729 | }; | ||
1730 | |||
1731 | static struct ahash_alg hash_algs[] = { | ||
1732 | { | ||
1733 | .init = tegra_se_aes_cmac_init, | ||
1734 | .update = tegra_se_aes_cmac_update, | ||
1735 | .final = tegra_se_aes_cmac_final, | ||
1736 | .finup = tegra_se_aes_cmac_finup, | ||
1737 | .digest = tegra_se_aes_cmac_digest, | ||
1738 | .setkey = tegra_se_aes_cmac_setkey, | ||
1739 | .halg.digestsize = TEGRA_SE_AES_CMAC_DIGEST_SIZE, | ||
1740 | .halg.base = { | ||
1741 | .cra_name = "cmac(aes)", | ||
1742 | .cra_driver_name = "tegra-se-cmac(aes)", | ||
1743 | .cra_priority = 100, | ||
1744 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1745 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1746 | .cra_ctxsize = sizeof(struct tegra_se_aes_cmac_context), | ||
1747 | .cra_alignmask = 0, | ||
1748 | .cra_module = THIS_MODULE, | ||
1749 | .cra_init = tegra_se_aes_cmac_cra_init, | ||
1750 | .cra_exit = tegra_se_aes_cmac_cra_exit, | ||
1751 | } | ||
1752 | }, { | ||
1753 | .init = tegra_se_sha_init, | ||
1754 | .update = tegra_se_sha_update, | ||
1755 | .final = tegra_se_sha_final, | ||
1756 | .finup = tegra_se_sha_finup, | ||
1757 | .digest = tegra_se_sha_digest, | ||
1758 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
1759 | .halg.base = { | ||
1760 | .cra_name = "sha1", | ||
1761 | .cra_driver_name = "tegra-se-sha1", | ||
1762 | .cra_priority = 100, | ||
1763 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1764 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
1765 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1766 | .cra_alignmask = 0, | ||
1767 | .cra_module = THIS_MODULE, | ||
1768 | .cra_init = tegra_se_sha_cra_init, | ||
1769 | .cra_exit = tegra_se_sha_cra_exit, | ||
1770 | } | ||
1771 | }, { | ||
1772 | .init = tegra_se_sha_init, | ||
1773 | .update = tegra_se_sha_update, | ||
1774 | .final = tegra_se_sha_final, | ||
1775 | .finup = tegra_se_sha_finup, | ||
1776 | .digest = tegra_se_sha_digest, | ||
1777 | .halg.digestsize = SHA224_DIGEST_SIZE, | ||
1778 | .halg.base = { | ||
1779 | .cra_name = "sha224", | ||
1780 | .cra_driver_name = "tegra-se-sha224", | ||
1781 | .cra_priority = 100, | ||
1782 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1783 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
1784 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1785 | .cra_alignmask = 0, | ||
1786 | .cra_module = THIS_MODULE, | ||
1787 | .cra_init = tegra_se_sha_cra_init, | ||
1788 | .cra_exit = tegra_se_sha_cra_exit, | ||
1789 | } | ||
1790 | }, { | ||
1791 | .init = tegra_se_sha_init, | ||
1792 | .update = tegra_se_sha_update, | ||
1793 | .final = tegra_se_sha_final, | ||
1794 | .finup = tegra_se_sha_finup, | ||
1795 | .digest = tegra_se_sha_digest, | ||
1796 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
1797 | .halg.base = { | ||
1798 | .cra_name = "sha256", | ||
1799 | .cra_driver_name = "tegra-se-sha256", | ||
1800 | .cra_priority = 100, | ||
1801 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1802 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
1803 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1804 | .cra_alignmask = 0, | ||
1805 | .cra_module = THIS_MODULE, | ||
1806 | .cra_init = tegra_se_sha_cra_init, | ||
1807 | .cra_exit = tegra_se_sha_cra_exit, | ||
1808 | } | ||
1809 | }, { | ||
1810 | .init = tegra_se_sha_init, | ||
1811 | .update = tegra_se_sha_update, | ||
1812 | .final = tegra_se_sha_final, | ||
1813 | .finup = tegra_se_sha_finup, | ||
1814 | .digest = tegra_se_sha_digest, | ||
1815 | .halg.digestsize = SHA384_DIGEST_SIZE, | ||
1816 | .halg.base = { | ||
1817 | .cra_name = "sha384", | ||
1818 | .cra_driver_name = "tegra-se-sha384", | ||
1819 | .cra_priority = 100, | ||
1820 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1821 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
1822 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1823 | .cra_alignmask = 0, | ||
1824 | .cra_module = THIS_MODULE, | ||
1825 | .cra_init = tegra_se_sha_cra_init, | ||
1826 | .cra_exit = tegra_se_sha_cra_exit, | ||
1827 | } | ||
1828 | }, { | ||
1829 | .init = tegra_se_sha_init, | ||
1830 | .update = tegra_se_sha_update, | ||
1831 | .final = tegra_se_sha_final, | ||
1832 | .finup = tegra_se_sha_finup, | ||
1833 | .digest = tegra_se_sha_digest, | ||
1834 | .halg.digestsize = SHA512_DIGEST_SIZE, | ||
1835 | .halg.base = { | ||
1836 | .cra_name = "sha512", | ||
1837 | .cra_driver_name = "tegra-se-sha512", | ||
1838 | .cra_priority = 100, | ||
1839 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1840 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
1841 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1842 | .cra_alignmask = 0, | ||
1843 | .cra_module = THIS_MODULE, | ||
1844 | .cra_init = tegra_se_sha_cra_init, | ||
1845 | .cra_exit = tegra_se_sha_cra_exit, | ||
1846 | } | ||
1847 | } | ||
1848 | }; | ||
1849 | |||
1850 | static int tegra_se_probe(struct platform_device *pdev) | ||
1851 | { | ||
1852 | struct tegra_se_dev *se_dev = NULL; | ||
1853 | struct resource *res = NULL; | ||
1854 | int err = 0, i = 0, j = 0, k = 0; | ||
1855 | |||
1856 | se_dev = kzalloc(sizeof(struct tegra_se_dev), GFP_KERNEL); | ||
1857 | if (!se_dev) { | ||
1858 | dev_err(&pdev->dev, "memory allocation failed\n"); | ||
1859 | return -ENOMEM; | ||
1860 | } | ||
1861 | |||
1862 | spin_lock_init(&se_dev->lock); | ||
1863 | crypto_init_queue(&se_dev->queue, TEGRA_SE_CRYPTO_QUEUE_LENGTH); | ||
1864 | platform_set_drvdata(pdev, se_dev); | ||
1865 | se_dev->dev = &pdev->dev; | ||
1866 | |||
1867 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1868 | if (!res) { | ||
1869 | err = -ENXIO; | ||
1870 | dev_err(se_dev->dev, "platform_get_resource failed\n"); | ||
1871 | goto fail; | ||
1872 | } | ||
1873 | |||
1874 | se_dev->io_reg = ioremap(res->start, resource_size(res)); | ||
1875 | if (!se_dev->io_reg) { | ||
1876 | err = -ENOMEM; | ||
1877 | dev_err(se_dev->dev, "ioremap failed\n"); | ||
1878 | goto fail; | ||
1879 | } | ||
1880 | |||
1881 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1882 | if (!res) { | ||
1883 | err = -ENXIO; | ||
1884 | dev_err(se_dev->dev, "platform_get_resource failed\n"); | ||
1885 | goto err_pmc; | ||
1886 | } | ||
1887 | |||
1888 | se_dev->pmc_io_reg = ioremap(res->start, resource_size(res)); | ||
1889 | if (!se_dev->pmc_io_reg) { | ||
1890 | err = -ENOMEM; | ||
1891 | dev_err(se_dev->dev, "pmc ioremap failed\n"); | ||
1892 | goto err_pmc; | ||
1893 | } | ||
1894 | |||
1895 | se_dev->irq = platform_get_irq(pdev, 0); | ||
1896 | if (!se_dev->irq) { | ||
1897 | err = -ENODEV; | ||
1898 | dev_err(se_dev->dev, "platform_get_irq failed\n"); | ||
1899 | goto err_irq; | ||
1900 | } | ||
1901 | |||
1902 | err = request_irq(se_dev->irq, tegra_se_irq, IRQF_DISABLED, | ||
1903 | DRIVER_NAME, se_dev); | ||
1904 | if (err) { | ||
1905 | dev_err(se_dev->dev, "request_irq failed - irq[%d] err[%d]\n", | ||
1906 | se_dev->irq, err); | ||
1907 | goto err_irq; | ||
1908 | } | ||
1909 | |||
1910 | /* Initialize the clock */ | ||
1911 | se_dev->pclk = clk_get(se_dev->dev, "se"); | ||
1912 | if (IS_ERR(se_dev->pclk)) { | ||
1913 | dev_err(se_dev->dev, "clock intialization failed (%d)\n", | ||
1914 | (int)se_dev->pclk); | ||
1915 | err = -ENODEV; | ||
1916 | goto clean; | ||
1917 | } | ||
1918 | |||
1919 | err = clk_set_rate(se_dev->pclk, ULONG_MAX); | ||
1920 | if (err) { | ||
1921 | dev_err(se_dev->dev, "clock set_rate failed.\n"); | ||
1922 | goto clean; | ||
1923 | } | ||
1924 | |||
1925 | err = tegra_init_key_slot(se_dev); | ||
1926 | if (err) { | ||
1927 | dev_err(se_dev->dev, "init_key_slot failed\n"); | ||
1928 | goto clean; | ||
1929 | } | ||
1930 | |||
1931 | init_completion(&se_dev->complete); | ||
1932 | se_work_q = alloc_workqueue("se_work_q", WQ_HIGHPRI | WQ_UNBOUND, 16); | ||
1933 | if (!se_work_q) { | ||
1934 | dev_err(se_dev->dev, "alloc_workqueue failed\n"); | ||
1935 | goto clean; | ||
1936 | } | ||
1937 | |||
1938 | sg_tegra_se_dev = se_dev; | ||
1939 | pm_runtime_enable(se_dev->dev); | ||
1940 | tegra_se_key_read_disable_all(); | ||
1941 | |||
1942 | err = tegra_se_alloc_ll_buf(se_dev, SE_MAX_SRC_SG_COUNT, | ||
1943 | SE_MAX_DST_SG_COUNT); | ||
1944 | if (err) { | ||
1945 | dev_err(se_dev->dev, "can not allocate ll dma buffer\n"); | ||
1946 | goto clean; | ||
1947 | } | ||
1948 | |||
1949 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | ||
1950 | INIT_LIST_HEAD(&aes_algs[i].cra_list); | ||
1951 | err = crypto_register_alg(&aes_algs[i]); | ||
1952 | if (err) { | ||
1953 | dev_err(se_dev->dev, | ||
1954 | "crypto_register_alg failed index[%d]\n", i); | ||
1955 | goto clean; | ||
1956 | } | ||
1957 | } | ||
1958 | |||
1959 | for (j = 0; j < ARRAY_SIZE(hash_algs); j++) { | ||
1960 | err = crypto_register_ahash(&hash_algs[j]); | ||
1961 | if (err) { | ||
1962 | dev_err(se_dev->dev, | ||
1963 | "crypto_register_sha alg failed index[%d]\n", i); | ||
1964 | goto clean; | ||
1965 | } | ||
1966 | } | ||
1967 | |||
1968 | #if defined(CONFIG_PM) | ||
1969 | se_dev->ctx_save_buf = dma_alloc_coherent(se_dev->dev, | ||
1970 | SE_CONTEXT_BUFER_SIZE, &se_dev->ctx_save_buf_adr, GFP_KERNEL); | ||
1971 | if (!se_dev->ctx_save_buf) { | ||
1972 | dev_err(se_dev->dev, "Context save buffer alloc filed\n"); | ||
1973 | goto clean; | ||
1974 | } | ||
1975 | #endif | ||
1976 | |||
1977 | dev_info(se_dev->dev, "%s: complete", __func__); | ||
1978 | return 0; | ||
1979 | |||
1980 | clean: | ||
1981 | pm_runtime_disable(se_dev->dev); | ||
1982 | for (k = 0; k < i; k++) | ||
1983 | crypto_unregister_alg(&aes_algs[k]); | ||
1984 | |||
1985 | for (k = 0; k < j; k++) | ||
1986 | crypto_unregister_ahash(&hash_algs[j]); | ||
1987 | |||
1988 | tegra_se_free_ll_buf(se_dev); | ||
1989 | |||
1990 | if (se_work_q) | ||
1991 | destroy_workqueue(se_work_q); | ||
1992 | |||
1993 | if (se_dev->pclk) | ||
1994 | clk_put(se_dev->pclk); | ||
1995 | |||
1996 | free_irq(se_dev->irq, &pdev->dev); | ||
1997 | |||
1998 | err_irq: | ||
1999 | iounmap(se_dev->pmc_io_reg); | ||
2000 | err_pmc: | ||
2001 | iounmap(se_dev->io_reg); | ||
2002 | |||
2003 | fail: | ||
2004 | platform_set_drvdata(pdev, NULL); | ||
2005 | kfree(se_dev); | ||
2006 | sg_tegra_se_dev = NULL; | ||
2007 | |||
2008 | return err; | ||
2009 | } | ||
2010 | |||
2011 | static int __devexit tegra_se_remove(struct platform_device *pdev) | ||
2012 | { | ||
2013 | struct tegra_se_dev *se_dev = platform_get_drvdata(pdev); | ||
2014 | int i; | ||
2015 | |||
2016 | if (!se_dev) | ||
2017 | return -ENODEV; | ||
2018 | |||
2019 | pm_runtime_disable(se_dev->dev); | ||
2020 | |||
2021 | cancel_work_sync(&se_work); | ||
2022 | if (se_work_q) | ||
2023 | destroy_workqueue(se_work_q); | ||
2024 | free_irq(se_dev->irq, &pdev->dev); | ||
2025 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | ||
2026 | crypto_unregister_alg(&aes_algs[i]); | ||
2027 | for (i = 0; i < ARRAY_SIZE(hash_algs); i++) | ||
2028 | crypto_unregister_ahash(&hash_algs[i]); | ||
2029 | if (se_dev->pclk) | ||
2030 | clk_put(se_dev->pclk); | ||
2031 | tegra_se_free_ll_buf(se_dev); | ||
2032 | if (se_dev->ctx_save_buf) { | ||
2033 | dma_free_coherent(se_dev->dev, SE_CONTEXT_BUFER_SIZE, | ||
2034 | se_dev->ctx_save_buf, se_dev->ctx_save_buf_adr); | ||
2035 | se_dev->ctx_save_buf = NULL; | ||
2036 | } | ||
2037 | iounmap(se_dev->io_reg); | ||
2038 | iounmap(se_dev->pmc_io_reg); | ||
2039 | kfree(se_dev); | ||
2040 | sg_tegra_se_dev = NULL; | ||
2041 | |||
2042 | return 0; | ||
2043 | } | ||
2044 | |||
2045 | #if defined(CONFIG_PM) | ||
2046 | static int tegra_se_resume(struct device *dev) | ||
2047 | { | ||
2048 | return 0; | ||
2049 | } | ||
2050 | |||
2051 | static int tegra_se_generate_rng_key(struct tegra_se_dev *se_dev) | ||
2052 | { | ||
2053 | int ret = 0; | ||
2054 | u32 val = 0; | ||
2055 | |||
2056 | *se_dev->src_ll_buf = 0; | ||
2057 | *se_dev->dst_ll_buf = 0; | ||
2058 | |||
2059 | /* Configure algorithm */ | ||
2060 | val = SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_ENC_MODE(MODE_KEY128) | | ||
2061 | SE_CONFIG_DST(DST_KEYTAB); | ||
2062 | se_writel(se_dev, val, SE_CONFIG_REG_OFFSET); | ||
2063 | |||
2064 | /* Configure destination key index number */ | ||
2065 | val = SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(srk_slot.slot_num) | | ||
2066 | SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(KEYS_0_3); | ||
2067 | se_writel(se_dev, val, SE_CRYPTO_KEYTABLE_DST_REG_OFFSET); | ||
2068 | |||
2069 | /* Configure crypto */ | ||
2070 | val = SE_CRYPTO_INPUT_SEL(INPUT_LFSR) | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
2071 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | | ||
2072 | SE_CRYPTO_HASH(HASH_DISABLE) | | ||
2073 | SE_CRYPTO_KEY_INDEX(ssk_slot.slot_num) | | ||
2074 | SE_CRYPTO_IV_SEL(IV_ORIGINAL); | ||
2075 | se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET); | ||
2076 | |||
2077 | ret = tegra_se_start_operation(se_dev, TEGRA_SE_KEY_128_SIZE, false); | ||
2078 | |||
2079 | return ret; | ||
2080 | } | ||
2081 | |||
2082 | static int tegra_se_generate_srk(struct tegra_se_dev *se_dev) | ||
2083 | { | ||
2084 | int ret = 0; | ||
2085 | u32 val = 0; | ||
2086 | |||
2087 | mutex_lock(&se_hw_lock); | ||
2088 | pm_runtime_get_sync(se_dev->dev); | ||
2089 | |||
2090 | ret = tegra_se_generate_rng_key(se_dev); | ||
2091 | if (ret) { | ||
2092 | pm_runtime_put(se_dev->dev); | ||
2093 | mutex_unlock(&se_hw_lock); | ||
2094 | return ret; | ||
2095 | } | ||
2096 | |||
2097 | *se_dev->src_ll_buf = 0; | ||
2098 | *se_dev->dst_ll_buf = 0; | ||
2099 | |||
2100 | val = SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_ENC_MODE(MODE_KEY128) | | ||
2101 | SE_CONFIG_DEC_ALG(ALG_NOP) | SE_CONFIG_DST(DST_SRK); | ||
2102 | |||
2103 | se_writel(se_dev, val, SE_CONFIG_REG_OFFSET); | ||
2104 | |||
2105 | val = SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
2106 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | | ||
2107 | SE_CRYPTO_HASH(HASH_DISABLE) | | ||
2108 | SE_CRYPTO_KEY_INDEX(srk_slot.slot_num) | | ||
2109 | SE_CRYPTO_IV_SEL(IV_UPDATED); | ||
2110 | |||
2111 | se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET); | ||
2112 | ret = tegra_se_start_operation(se_dev, TEGRA_SE_KEY_128_SIZE, false); | ||
2113 | |||
2114 | pm_runtime_put(se_dev->dev); | ||
2115 | mutex_unlock(&se_hw_lock); | ||
2116 | |||
2117 | return ret; | ||
2118 | } | ||
2119 | |||
2120 | static int tegra_se_lp_generate_random_data(struct tegra_se_dev *se_dev) | ||
2121 | { | ||
2122 | struct tegra_se_ll *src_ll, *dst_ll; | ||
2123 | int ret = 0; | ||
2124 | u32 val; | ||
2125 | |||
2126 | mutex_lock(&se_hw_lock); | ||
2127 | pm_runtime_get_sync(se_dev->dev); | ||
2128 | |||
2129 | *se_dev->src_ll_buf = 0; | ||
2130 | *se_dev->dst_ll_buf = 0; | ||
2131 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
2132 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2133 | src_ll->addr = se_dev->ctx_save_buf_adr; | ||
2134 | src_ll->data_len = SE_CONTEXT_SAVE_RANDOM_DATA_SIZE; | ||
2135 | dst_ll->addr = se_dev->ctx_save_buf_adr; | ||
2136 | dst_ll->data_len = SE_CONTEXT_SAVE_RANDOM_DATA_SIZE; | ||
2137 | |||
2138 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_X931, true, | ||
2139 | TEGRA_SE_KEY_128_SIZE); | ||
2140 | |||
2141 | /* Configure crypto */ | ||
2142 | val = SE_CRYPTO_INPUT_SEL(INPUT_LFSR) | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
2143 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | | ||
2144 | SE_CRYPTO_HASH(HASH_DISABLE) | | ||
2145 | SE_CRYPTO_KEY_INDEX(srk_slot.slot_num) | | ||
2146 | SE_CRYPTO_IV_SEL(IV_ORIGINAL); | ||
2147 | |||
2148 | se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET); | ||
2149 | ret = tegra_se_start_operation(se_dev, | ||
2150 | SE_CONTEXT_SAVE_RANDOM_DATA_SIZE, false); | ||
2151 | |||
2152 | pm_runtime_put(se_dev->dev); | ||
2153 | mutex_unlock(&se_hw_lock); | ||
2154 | |||
2155 | return ret; | ||
2156 | |||
2157 | } | ||
2158 | |||
2159 | static int tegra_se_lp_encrypt_context_data(struct tegra_se_dev *se_dev, | ||
2160 | u32 context_offset, u32 data_size) | ||
2161 | { | ||
2162 | struct tegra_se_ll *src_ll, *dst_ll; | ||
2163 | int ret = 0; | ||
2164 | |||
2165 | mutex_lock(&se_hw_lock); | ||
2166 | pm_runtime_get_sync(se_dev->dev); | ||
2167 | |||
2168 | *se_dev->src_ll_buf = 0; | ||
2169 | *se_dev->dst_ll_buf = 0; | ||
2170 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
2171 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2172 | src_ll->addr = se_dev->ctx_save_buf_adr + context_offset; | ||
2173 | src_ll->data_len = data_size; | ||
2174 | dst_ll->addr = se_dev->ctx_save_buf_adr + context_offset; | ||
2175 | dst_ll->data_len = data_size; | ||
2176 | |||
2177 | se_writel(se_dev, SE_CONTEXT_SAVE_SRC(MEM), | ||
2178 | SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2179 | |||
2180 | ret = tegra_se_start_operation(se_dev, data_size, true); | ||
2181 | |||
2182 | pm_runtime_put(se_dev->dev); | ||
2183 | |||
2184 | mutex_unlock(&se_hw_lock); | ||
2185 | |||
2186 | return ret; | ||
2187 | } | ||
2188 | |||
2189 | static int tegra_se_lp_sticky_bits_context_save(struct tegra_se_dev *se_dev) | ||
2190 | { | ||
2191 | struct tegra_se_ll *dst_ll; | ||
2192 | int ret = 0; | ||
2193 | |||
2194 | mutex_lock(&se_hw_lock); | ||
2195 | pm_runtime_get_sync(se_dev->dev); | ||
2196 | |||
2197 | *se_dev->src_ll_buf = 0; | ||
2198 | *se_dev->dst_ll_buf = 0; | ||
2199 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2200 | dst_ll->addr = (se_dev->ctx_save_buf_adr + | ||
2201 | SE_CONTEXT_SAVE_STICKY_BITS_OFFSET); | ||
2202 | dst_ll->data_len = SE_CONTEXT_SAVE_STICKY_BITS_SIZE; | ||
2203 | |||
2204 | se_writel(se_dev, SE_CONTEXT_SAVE_SRC(STICKY_BITS), | ||
2205 | SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2206 | |||
2207 | ret = tegra_se_start_operation(se_dev, | ||
2208 | SE_CONTEXT_SAVE_STICKY_BITS_SIZE, true); | ||
2209 | |||
2210 | pm_runtime_put(se_dev->dev); | ||
2211 | mutex_unlock(&se_hw_lock); | ||
2212 | |||
2213 | return ret; | ||
2214 | } | ||
2215 | |||
2216 | static int tegra_se_lp_keytable_context_save(struct tegra_se_dev *se_dev) | ||
2217 | { | ||
2218 | struct tegra_se_ll *dst_ll; | ||
2219 | int ret = 0, i, j; | ||
2220 | u32 val = 0; | ||
2221 | |||
2222 | /* take access to the hw */ | ||
2223 | mutex_lock(&se_hw_lock); | ||
2224 | pm_runtime_get_sync(se_dev->dev); | ||
2225 | |||
2226 | *se_dev->dst_ll_buf = 0; | ||
2227 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2228 | dst_ll->addr = (se_dev->ctx_save_buf_adr + SE_CONTEXT_SAVE_KEYS_OFFSET); | ||
2229 | dst_ll->data_len = TEGRA_SE_KEY_128_SIZE; | ||
2230 | |||
2231 | for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) { | ||
2232 | for (j = 0; j < 2; j++) { | ||
2233 | val = SE_CONTEXT_SAVE_SRC(KEYTABLE) | | ||
2234 | SE_CONTEXT_SAVE_KEY_INDEX(i) | | ||
2235 | SE_CONTEXT_SAVE_WORD_QUAD(j); | ||
2236 | se_writel(se_dev, | ||
2237 | val, SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2238 | ret = tegra_se_start_operation(se_dev, | ||
2239 | TEGRA_SE_KEY_128_SIZE, true); | ||
2240 | if (ret) | ||
2241 | break; | ||
2242 | dst_ll->addr += TEGRA_SE_KEY_128_SIZE; | ||
2243 | } | ||
2244 | } | ||
2245 | |||
2246 | pm_runtime_put(se_dev->dev); | ||
2247 | mutex_unlock(&se_hw_lock); | ||
2248 | |||
2249 | return ret; | ||
2250 | } | ||
2251 | |||
2252 | static int tegra_se_lp_iv_context_save(struct tegra_se_dev *se_dev, | ||
2253 | bool org_iv, u32 context_offset) | ||
2254 | { | ||
2255 | struct tegra_se_ll *dst_ll; | ||
2256 | int ret = 0, i; | ||
2257 | u32 val = 0; | ||
2258 | |||
2259 | mutex_lock(&se_hw_lock); | ||
2260 | pm_runtime_get_sync(se_dev->dev); | ||
2261 | |||
2262 | *se_dev->dst_ll_buf = 0; | ||
2263 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2264 | dst_ll->addr = (se_dev->ctx_save_buf_adr + context_offset); | ||
2265 | dst_ll->data_len = TEGRA_SE_AES_IV_SIZE; | ||
2266 | |||
2267 | for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) { | ||
2268 | val = SE_CONTEXT_SAVE_SRC(KEYTABLE) | | ||
2269 | SE_CONTEXT_SAVE_KEY_INDEX(i) | | ||
2270 | (org_iv ? SE_CONTEXT_SAVE_WORD_QUAD(ORIG_IV) : | ||
2271 | SE_CONTEXT_SAVE_WORD_QUAD(UPD_IV)); | ||
2272 | se_writel(se_dev, val, SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2273 | ret = tegra_se_start_operation(se_dev, | ||
2274 | TEGRA_SE_AES_IV_SIZE, true); | ||
2275 | if (ret) | ||
2276 | break; | ||
2277 | dst_ll->addr += TEGRA_SE_AES_IV_SIZE; | ||
2278 | } | ||
2279 | |||
2280 | pm_runtime_put(se_dev->dev); | ||
2281 | mutex_unlock(&se_hw_lock); | ||
2282 | |||
2283 | return ret; | ||
2284 | } | ||
2285 | |||
2286 | static int tegra_se_save_SRK(struct tegra_se_dev *se_dev) | ||
2287 | { | ||
2288 | int ret = 0; | ||
2289 | |||
2290 | mutex_lock(&se_hw_lock); | ||
2291 | pm_runtime_get_sync(se_dev->dev); | ||
2292 | |||
2293 | se_writel(se_dev, SE_CONTEXT_SAVE_SRC(SRK), | ||
2294 | SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2295 | ret = tegra_se_start_operation(se_dev, 0, true); | ||
2296 | |||
2297 | pm_runtime_put(se_dev->dev); | ||
2298 | mutex_unlock(&se_hw_lock); | ||
2299 | |||
2300 | return ret; | ||
2301 | } | ||
2302 | |||
2303 | static int tegra_se_suspend(struct device *dev) | ||
2304 | { | ||
2305 | struct platform_device *pdev = to_platform_device(dev); | ||
2306 | struct tegra_se_dev *se_dev = platform_get_drvdata(pdev); | ||
2307 | int err = 0, i; | ||
2308 | unsigned char *dt_buf = NULL; | ||
2309 | u8 pdata[SE_CONTEXT_KNOWN_PATTERN_SIZE] = { | ||
2310 | 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | ||
2311 | 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}; | ||
2312 | |||
2313 | if (!se_dev) | ||
2314 | return -ENODEV; | ||
2315 | |||
2316 | /* Generate SRK */ | ||
2317 | err = tegra_se_generate_srk(se_dev); | ||
2318 | if (err) { | ||
2319 | dev_err(se_dev->dev, "\n LP SRK genration failed\n"); | ||
2320 | goto out; | ||
2321 | } | ||
2322 | |||
2323 | /* Generate random data*/ | ||
2324 | err = tegra_se_lp_generate_random_data(se_dev); | ||
2325 | if (err) { | ||
2326 | dev_err(se_dev->dev, "\n LP random pattern generation failed\n"); | ||
2327 | goto out; | ||
2328 | } | ||
2329 | |||
2330 | /* Encrypt random data */ | ||
2331 | err = tegra_se_lp_encrypt_context_data(se_dev, | ||
2332 | SE_CONTEXT_SAVE_RANDOM_DATA_OFFSET, | ||
2333 | SE_CONTEXT_SAVE_RANDOM_DATA_SIZE); | ||
2334 | if (err) { | ||
2335 | dev_err(se_dev->dev, "\n LP random pattern encryption failed\n"); | ||
2336 | goto out; | ||
2337 | } | ||
2338 | |||
2339 | /* Sticky bits context save*/ | ||
2340 | err = tegra_se_lp_sticky_bits_context_save(se_dev); | ||
2341 | if (err) { | ||
2342 | dev_err(se_dev->dev, "\n LP sticky bits context save failure\n"); | ||
2343 | goto out; | ||
2344 | } | ||
2345 | |||
2346 | /* Key table context save*/ | ||
2347 | err = tegra_se_lp_keytable_context_save(se_dev); | ||
2348 | if (err) { | ||
2349 | dev_err(se_dev->dev, "\n LP key table save failure\n"); | ||
2350 | goto out; | ||
2351 | } | ||
2352 | |||
2353 | /* Original iv context save*/ | ||
2354 | err = tegra_se_lp_iv_context_save(se_dev, | ||
2355 | true, SE_CONTEXT_ORIGINAL_IV_OFFSET); | ||
2356 | if (err) { | ||
2357 | dev_err(se_dev->dev, "\n LP original iv save failure\n"); | ||
2358 | goto out; | ||
2359 | } | ||
2360 | |||
2361 | /* UPdated iv context save*/ | ||
2362 | err = tegra_se_lp_iv_context_save(se_dev, | ||
2363 | false, SE_CONTEXT_UPDATED_IV_OFFSET); | ||
2364 | if (err) { | ||
2365 | dev_err(se_dev->dev, "\n LP updated iv save failure\n"); | ||
2366 | goto out; | ||
2367 | } | ||
2368 | |||
2369 | /* Encrypt known pattern */ | ||
2370 | dt_buf = (unsigned char *)se_dev->ctx_save_buf; | ||
2371 | dt_buf += SE_CONTEXT_KNOWN_PATTERN_OFFSET; | ||
2372 | for (i = 0; i < SE_CONTEXT_KNOWN_PATTERN_SIZE; i++) | ||
2373 | dt_buf[i] = pdata[i]; | ||
2374 | err = tegra_se_lp_encrypt_context_data(se_dev, | ||
2375 | SE_CONTEXT_KNOWN_PATTERN_OFFSET, SE_CONTEXT_KNOWN_PATTERN_SIZE); | ||
2376 | if (err) { | ||
2377 | dev_err(se_dev->dev, "LP known pattern save failure\n"); | ||
2378 | goto out; | ||
2379 | } | ||
2380 | |||
2381 | /* Write lp context buffer address into PMC scratch register */ | ||
2382 | writel(se_dev->ctx_save_buf_adr, | ||
2383 | se_dev->pmc_io_reg + PMC_SCRATCH43_REG_OFFSET); | ||
2384 | |||
2385 | /* Saves SRK in secure scratch */ | ||
2386 | err = tegra_se_save_SRK(se_dev); | ||
2387 | if (err) { | ||
2388 | dev_err(se_dev->dev, "LP SRK save failure\n"); | ||
2389 | goto out; | ||
2390 | } | ||
2391 | |||
2392 | out: | ||
2393 | return err; | ||
2394 | } | ||
2395 | #endif | ||
2396 | |||
2397 | #if defined(CONFIG_PM_RUNTIME) | ||
2398 | static int tegra_se_runtime_suspend(struct device *dev) | ||
2399 | { | ||
2400 | /* | ||
2401 | * do a dummy read, to avoid scenarios where you have unposted writes | ||
2402 | * still on the bus, before disabling clocks | ||
2403 | */ | ||
2404 | se_readl(sg_tegra_se_dev, SE_CONFIG_REG_OFFSET); | ||
2405 | |||
2406 | clk_disable(sg_tegra_se_dev->pclk); | ||
2407 | return 0; | ||
2408 | } | ||
2409 | |||
2410 | static int tegra_se_runtime_resume(struct device *dev) | ||
2411 | { | ||
2412 | clk_enable(sg_tegra_se_dev->pclk); | ||
2413 | return 0; | ||
2414 | } | ||
2415 | |||
2416 | static const struct dev_pm_ops tegra_se_dev_pm_ops = { | ||
2417 | .runtime_suspend = tegra_se_runtime_suspend, | ||
2418 | .runtime_resume = tegra_se_runtime_resume, | ||
2419 | #if defined(CONFIG_PM) | ||
2420 | .suspend = tegra_se_suspend, | ||
2421 | .resume = tegra_se_resume, | ||
2422 | #endif | ||
2423 | }; | ||
2424 | #endif | ||
2425 | |||
2426 | static struct platform_driver tegra_se_driver = { | ||
2427 | .probe = tegra_se_probe, | ||
2428 | .remove = __devexit_p(tegra_se_remove), | ||
2429 | .driver = { | ||
2430 | .name = "tegra-se", | ||
2431 | .owner = THIS_MODULE, | ||
2432 | #if defined(CONFIG_PM_RUNTIME) | ||
2433 | .pm = &tegra_se_dev_pm_ops, | ||
2434 | #endif | ||
2435 | }, | ||
2436 | }; | ||
2437 | |||
2438 | static int __init tegra_se_module_init(void) | ||
2439 | { | ||
2440 | return platform_driver_register(&tegra_se_driver); | ||
2441 | } | ||
2442 | |||
2443 | static void __exit tegra_se_module_exit(void) | ||
2444 | { | ||
2445 | platform_driver_unregister(&tegra_se_driver); | ||
2446 | } | ||
2447 | |||
2448 | module_init(tegra_se_module_init); | ||
2449 | module_exit(tegra_se_module_exit); | ||
2450 | |||
2451 | MODULE_DESCRIPTION("Tegra Crypto algorithm support"); | ||
2452 | MODULE_AUTHOR("NVIDIA Corporation"); | ||
2453 | MODULE_LICENSE("GPL"); | ||
2454 | MODULE_ALIAS("tegra-se"); | ||
2455 | |||