diff options
author | Marek Vasut <marex@denx.de> | 2013-12-10 14:26:21 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2014-01-05 07:49:54 -0500 |
commit | 15b59e7c3733f90ff1f7dd66ad77ae1c90bcdff5 (patch) | |
tree | f2d6df9e0f69b10fc3d86080320f689e05211650 /drivers/crypto | |
parent | c493c04403769d833864ad5be2ff64dee7567ca7 (diff) |
crypto: mxs - Add Freescale MXS DCP driver
Add support for the MXS DCP block. The driver currently supports
SHA-1/SHA-256 hashing and AES-128 CBC/ECB modes. The non-standard
CRC32 is not yet supported.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: devicetree@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 17 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/mxs-dcp.c | 1100 |
3 files changed, 1118 insertions, 0 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 9ef60dc84a79..13857f5d28f7 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -401,4 +401,21 @@ if CRYPTO_DEV_CCP | |||
401 | source "drivers/crypto/ccp/Kconfig" | 401 | source "drivers/crypto/ccp/Kconfig" |
402 | endif | 402 | endif |
403 | 403 | ||
404 | config CRYPTO_DEV_MXS_DCP | ||
405 | tristate "Support for Freescale MXS DCP" | ||
406 | depends on ARCH_MXS | ||
407 | select CRYPTO_SHA1 | ||
408 | select CRYPTO_SHA256 | ||
409 | select CRYPTO_CBC | ||
410 | select CRYPTO_ECB | ||
411 | select CRYPTO_AES | ||
412 | select CRYPTO_BLKCIPHER | ||
413 | select CRYPTO_ALGAPI | ||
414 | help | ||
415 | The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB | ||
416 | co-processor on the die. | ||
417 | |||
418 | To compile this driver as a module, choose M here: the module | ||
419 | will be called mxs-dcp. | ||
420 | |||
404 | endif # CRYPTO_HW | 421 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 22447f77449d..94a8aa50e4c7 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -22,3 +22,4 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o | |||
22 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o | 22 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o |
23 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o | 23 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o |
24 | obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ | 24 | obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ |
25 | obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o | ||
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c new file mode 100644 index 000000000000..d41917c555d5 --- /dev/null +++ b/drivers/crypto/mxs-dcp.c | |||
@@ -0,0 +1,1100 @@ | |||
1 | /* | ||
2 | * Freescale i.MX23/i.MX28 Data Co-Processor driver | ||
3 | * | ||
4 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> | ||
5 | * | ||
6 | * The code contained herein is licensed under the GNU General Public | ||
7 | * License. You may obtain a copy of the GNU General Public License | ||
8 | * Version 2 or later at the following locations: | ||
9 | * | ||
10 | * http://www.opensource.org/licenses/gpl-license.html | ||
11 | * http://www.gnu.org/copyleft/gpl.html | ||
12 | */ | ||
13 | |||
14 | #include <linux/crypto.h> | ||
15 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/kthread.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/stmp_device.h> | ||
24 | |||
25 | #include <crypto/aes.h> | ||
26 | #include <crypto/sha.h> | ||
27 | #include <crypto/internal/hash.h> | ||
28 | |||
29 | #define DCP_MAX_CHANS 4 | ||
30 | #define DCP_BUF_SZ PAGE_SIZE | ||
31 | |||
32 | /* DCP DMA descriptor. */ | ||
33 | struct dcp_dma_desc { | ||
34 | uint32_t next_cmd_addr; | ||
35 | uint32_t control0; | ||
36 | uint32_t control1; | ||
37 | uint32_t source; | ||
38 | uint32_t destination; | ||
39 | uint32_t size; | ||
40 | uint32_t payload; | ||
41 | uint32_t status; | ||
42 | }; | ||
43 | |||
44 | /* Coherent aligned block for bounce buffering. */ | ||
45 | struct dcp_coherent_block { | ||
46 | uint8_t aes_in_buf[DCP_BUF_SZ]; | ||
47 | uint8_t aes_out_buf[DCP_BUF_SZ]; | ||
48 | uint8_t sha_in_buf[DCP_BUF_SZ]; | ||
49 | |||
50 | uint8_t aes_key[2 * AES_KEYSIZE_128]; | ||
51 | uint8_t sha_digest[SHA256_DIGEST_SIZE]; | ||
52 | |||
53 | struct dcp_dma_desc desc[DCP_MAX_CHANS]; | ||
54 | }; | ||
55 | |||
56 | struct dcp { | ||
57 | struct device *dev; | ||
58 | void __iomem *base; | ||
59 | |||
60 | uint32_t caps; | ||
61 | |||
62 | struct dcp_coherent_block *coh; | ||
63 | |||
64 | struct completion completion[DCP_MAX_CHANS]; | ||
65 | struct mutex mutex[DCP_MAX_CHANS]; | ||
66 | struct task_struct *thread[DCP_MAX_CHANS]; | ||
67 | struct crypto_queue queue[DCP_MAX_CHANS]; | ||
68 | }; | ||
69 | |||
70 | enum dcp_chan { | ||
71 | DCP_CHAN_HASH_SHA = 0, | ||
72 | DCP_CHAN_CRYPTO = 2, | ||
73 | }; | ||
74 | |||
75 | struct dcp_async_ctx { | ||
76 | /* Common context */ | ||
77 | enum dcp_chan chan; | ||
78 | uint32_t fill; | ||
79 | |||
80 | /* SHA Hash-specific context */ | ||
81 | struct mutex mutex; | ||
82 | uint32_t alg; | ||
83 | unsigned int hot:1; | ||
84 | |||
85 | /* Crypto-specific context */ | ||
86 | unsigned int enc:1; | ||
87 | unsigned int ecb:1; | ||
88 | struct crypto_ablkcipher *fallback; | ||
89 | unsigned int key_len; | ||
90 | uint8_t key[AES_KEYSIZE_128]; | ||
91 | }; | ||
92 | |||
93 | struct dcp_sha_req_ctx { | ||
94 | unsigned int init:1; | ||
95 | unsigned int fini:1; | ||
96 | }; | ||
97 | |||
98 | /* | ||
99 | * There can even be only one instance of the MXS DCP due to the | ||
100 | * design of Linux Crypto API. | ||
101 | */ | ||
102 | static struct dcp *global_sdcp; | ||
103 | DEFINE_MUTEX(global_mutex); | ||
104 | |||
105 | /* DCP register layout. */ | ||
106 | #define MXS_DCP_CTRL 0x00 | ||
107 | #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) | ||
108 | #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) | ||
109 | |||
110 | #define MXS_DCP_STAT 0x10 | ||
111 | #define MXS_DCP_STAT_CLR 0x18 | ||
112 | #define MXS_DCP_STAT_IRQ_MASK 0xf | ||
113 | |||
114 | #define MXS_DCP_CHANNELCTRL 0x20 | ||
115 | #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff | ||
116 | |||
117 | #define MXS_DCP_CAPABILITY1 0x40 | ||
118 | #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) | ||
119 | #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) | ||
120 | #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) | ||
121 | |||
122 | #define MXS_DCP_CONTEXT 0x50 | ||
123 | |||
124 | #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) | ||
125 | |||
126 | #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) | ||
127 | |||
128 | #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) | ||
129 | #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) | ||
130 | |||
131 | /* DMA descriptor bits. */ | ||
132 | #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) | ||
133 | #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) | ||
134 | #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) | ||
135 | #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) | ||
136 | #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) | ||
137 | #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) | ||
138 | #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) | ||
139 | #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) | ||
140 | #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) | ||
141 | |||
142 | #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) | ||
143 | #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) | ||
144 | #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) | ||
145 | #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) | ||
146 | #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) | ||
147 | |||
148 | static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) | ||
149 | { | ||
150 | struct dcp *sdcp = global_sdcp; | ||
151 | const int chan = actx->chan; | ||
152 | uint32_t stat; | ||
153 | int ret; | ||
154 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; | ||
155 | |||
156 | dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), | ||
157 | DMA_TO_DEVICE); | ||
158 | |||
159 | reinit_completion(&sdcp->completion[chan]); | ||
160 | |||
161 | /* Clear status register. */ | ||
162 | writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); | ||
163 | |||
164 | /* Load the DMA descriptor. */ | ||
165 | writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); | ||
166 | |||
167 | /* Increment the semaphore to start the DMA transfer. */ | ||
168 | writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); | ||
169 | |||
170 | ret = wait_for_completion_timeout(&sdcp->completion[chan], | ||
171 | msecs_to_jiffies(1000)); | ||
172 | if (!ret) { | ||
173 | dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", | ||
174 | chan, readl(sdcp->base + MXS_DCP_STAT)); | ||
175 | return -ETIMEDOUT; | ||
176 | } | ||
177 | |||
178 | stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); | ||
179 | if (stat & 0xff) { | ||
180 | dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", | ||
181 | chan, stat); | ||
182 | return -EINVAL; | ||
183 | } | ||
184 | |||
185 | dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Encryption (AES128) | ||
192 | */ | ||
193 | static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init) | ||
194 | { | ||
195 | struct dcp *sdcp = global_sdcp; | ||
196 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; | ||
197 | int ret; | ||
198 | |||
199 | dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, | ||
200 | 2 * AES_KEYSIZE_128, | ||
201 | DMA_TO_DEVICE); | ||
202 | dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, | ||
203 | DCP_BUF_SZ, DMA_TO_DEVICE); | ||
204 | dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, | ||
205 | DCP_BUF_SZ, DMA_FROM_DEVICE); | ||
206 | |||
207 | /* Fill in the DMA descriptor. */ | ||
208 | desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | | ||
209 | MXS_DCP_CONTROL0_INTERRUPT | | ||
210 | MXS_DCP_CONTROL0_ENABLE_CIPHER; | ||
211 | |||
212 | /* Payload contains the key. */ | ||
213 | desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; | ||
214 | |||
215 | if (actx->enc) | ||
216 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; | ||
217 | if (init) | ||
218 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; | ||
219 | |||
220 | desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; | ||
221 | |||
222 | if (actx->ecb) | ||
223 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; | ||
224 | else | ||
225 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; | ||
226 | |||
227 | desc->next_cmd_addr = 0; | ||
228 | desc->source = src_phys; | ||
229 | desc->destination = dst_phys; | ||
230 | desc->size = actx->fill; | ||
231 | desc->payload = key_phys; | ||
232 | desc->status = 0; | ||
233 | |||
234 | ret = mxs_dcp_start_dma(actx); | ||
235 | |||
236 | dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, | ||
237 | DMA_TO_DEVICE); | ||
238 | dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); | ||
239 | dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); | ||
240 | |||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) | ||
245 | { | ||
246 | struct dcp *sdcp = global_sdcp; | ||
247 | |||
248 | struct ablkcipher_request *req = ablkcipher_request_cast(arq); | ||
249 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); | ||
250 | |||
251 | struct scatterlist *dst = req->dst; | ||
252 | struct scatterlist *src = req->src; | ||
253 | const int nents = sg_nents(req->src); | ||
254 | |||
255 | const int out_off = DCP_BUF_SZ; | ||
256 | uint8_t *in_buf = sdcp->coh->aes_in_buf; | ||
257 | uint8_t *out_buf = sdcp->coh->aes_out_buf; | ||
258 | |||
259 | uint8_t *out_tmp, *src_buf, *dst_buf = NULL; | ||
260 | uint32_t dst_off = 0; | ||
261 | |||
262 | uint8_t *key = sdcp->coh->aes_key; | ||
263 | |||
264 | int ret = 0; | ||
265 | int split = 0; | ||
266 | unsigned int i, len, clen, rem = 0; | ||
267 | int init = 0; | ||
268 | |||
269 | actx->fill = 0; | ||
270 | |||
271 | /* Copy the key from the temporary location. */ | ||
272 | memcpy(key, actx->key, actx->key_len); | ||
273 | |||
274 | if (!actx->ecb) { | ||
275 | /* Copy the CBC IV just past the key. */ | ||
276 | memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); | ||
277 | /* CBC needs the INIT set. */ | ||
278 | init = 1; | ||
279 | } else { | ||
280 | memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); | ||
281 | } | ||
282 | |||
283 | for_each_sg(req->src, src, nents, i) { | ||
284 | src_buf = sg_virt(src); | ||
285 | len = sg_dma_len(src); | ||
286 | |||
287 | do { | ||
288 | if (actx->fill + len > out_off) | ||
289 | clen = out_off - actx->fill; | ||
290 | else | ||
291 | clen = len; | ||
292 | |||
293 | memcpy(in_buf + actx->fill, src_buf, clen); | ||
294 | len -= clen; | ||
295 | src_buf += clen; | ||
296 | actx->fill += clen; | ||
297 | |||
298 | /* | ||
299 | * If we filled the buffer or this is the last SG, | ||
300 | * submit the buffer. | ||
301 | */ | ||
302 | if (actx->fill == out_off || sg_is_last(src)) { | ||
303 | ret = mxs_dcp_run_aes(actx, init); | ||
304 | if (ret) | ||
305 | return ret; | ||
306 | init = 0; | ||
307 | |||
308 | out_tmp = out_buf; | ||
309 | while (dst && actx->fill) { | ||
310 | if (!split) { | ||
311 | dst_buf = sg_virt(dst); | ||
312 | dst_off = 0; | ||
313 | } | ||
314 | rem = min(sg_dma_len(dst) - dst_off, | ||
315 | actx->fill); | ||
316 | |||
317 | memcpy(dst_buf + dst_off, out_tmp, rem); | ||
318 | out_tmp += rem; | ||
319 | dst_off += rem; | ||
320 | actx->fill -= rem; | ||
321 | |||
322 | if (dst_off == sg_dma_len(dst)) { | ||
323 | dst = sg_next(dst); | ||
324 | split = 0; | ||
325 | } else { | ||
326 | split = 1; | ||
327 | } | ||
328 | } | ||
329 | } | ||
330 | } while (len); | ||
331 | } | ||
332 | |||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | static int dcp_chan_thread_aes(void *data) | ||
337 | { | ||
338 | struct dcp *sdcp = global_sdcp; | ||
339 | const int chan = DCP_CHAN_CRYPTO; | ||
340 | |||
341 | struct crypto_async_request *backlog; | ||
342 | struct crypto_async_request *arq; | ||
343 | |||
344 | int ret; | ||
345 | |||
346 | do { | ||
347 | __set_current_state(TASK_INTERRUPTIBLE); | ||
348 | |||
349 | mutex_lock(&sdcp->mutex[chan]); | ||
350 | backlog = crypto_get_backlog(&sdcp->queue[chan]); | ||
351 | arq = crypto_dequeue_request(&sdcp->queue[chan]); | ||
352 | mutex_unlock(&sdcp->mutex[chan]); | ||
353 | |||
354 | if (backlog) | ||
355 | backlog->complete(backlog, -EINPROGRESS); | ||
356 | |||
357 | if (arq) { | ||
358 | ret = mxs_dcp_aes_block_crypt(arq); | ||
359 | arq->complete(arq, ret); | ||
360 | continue; | ||
361 | } | ||
362 | |||
363 | schedule(); | ||
364 | } while (!kthread_should_stop()); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) | ||
370 | { | ||
371 | struct crypto_tfm *tfm = | ||
372 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
373 | struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx( | ||
374 | crypto_ablkcipher_reqtfm(req)); | ||
375 | int ret; | ||
376 | |||
377 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
378 | |||
379 | if (enc) | ||
380 | ret = crypto_ablkcipher_encrypt(req); | ||
381 | else | ||
382 | ret = crypto_ablkcipher_decrypt(req); | ||
383 | |||
384 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
385 | |||
386 | return ret; | ||
387 | } | ||
388 | |||
389 | static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) | ||
390 | { | ||
391 | struct dcp *sdcp = global_sdcp; | ||
392 | struct crypto_async_request *arq = &req->base; | ||
393 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); | ||
394 | int ret; | ||
395 | |||
396 | if (unlikely(actx->key_len != AES_KEYSIZE_128)) | ||
397 | return mxs_dcp_block_fallback(req, enc); | ||
398 | |||
399 | actx->enc = enc; | ||
400 | actx->ecb = ecb; | ||
401 | actx->chan = DCP_CHAN_CRYPTO; | ||
402 | |||
403 | mutex_lock(&sdcp->mutex[actx->chan]); | ||
404 | ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); | ||
405 | mutex_unlock(&sdcp->mutex[actx->chan]); | ||
406 | |||
407 | wake_up_process(sdcp->thread[actx->chan]); | ||
408 | |||
409 | return -EINPROGRESS; | ||
410 | } | ||
411 | |||
412 | static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
413 | { | ||
414 | return mxs_dcp_aes_enqueue(req, 0, 1); | ||
415 | } | ||
416 | |||
417 | static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
418 | { | ||
419 | return mxs_dcp_aes_enqueue(req, 1, 1); | ||
420 | } | ||
421 | |||
422 | static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
423 | { | ||
424 | return mxs_dcp_aes_enqueue(req, 0, 0); | ||
425 | } | ||
426 | |||
427 | static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
428 | { | ||
429 | return mxs_dcp_aes_enqueue(req, 1, 0); | ||
430 | } | ||
431 | |||
432 | static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
433 | unsigned int len) | ||
434 | { | ||
435 | struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm); | ||
436 | unsigned int ret; | ||
437 | |||
438 | /* | ||
439 | * AES 128 is supposed by the hardware, store key into temporary | ||
440 | * buffer and exit. We must use the temporary buffer here, since | ||
441 | * there can still be an operation in progress. | ||
442 | */ | ||
443 | actx->key_len = len; | ||
444 | if (len == AES_KEYSIZE_128) { | ||
445 | memcpy(actx->key, key, len); | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | /* Check if the key size is supported by kernel at all. */ | ||
450 | if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { | ||
451 | tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
452 | return -EINVAL; | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * If the requested AES key size is not supported by the hardware, | ||
457 | * but is supported by in-kernel software implementation, we use | ||
458 | * software fallback. | ||
459 | */ | ||
460 | actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | ||
461 | actx->fallback->base.crt_flags |= | ||
462 | tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK; | ||
463 | |||
464 | ret = crypto_ablkcipher_setkey(actx->fallback, key, len); | ||
465 | if (!ret) | ||
466 | return 0; | ||
467 | |||
468 | tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; | ||
469 | tfm->base.crt_flags |= | ||
470 | actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK; | ||
471 | |||
472 | return ret; | ||
473 | } | ||
474 | |||
475 | static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) | ||
476 | { | ||
477 | const char *name = tfm->__crt_alg->cra_name; | ||
478 | const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; | ||
479 | struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); | ||
480 | struct crypto_ablkcipher *blk; | ||
481 | |||
482 | blk = crypto_alloc_ablkcipher(name, 0, flags); | ||
483 | if (IS_ERR(blk)) | ||
484 | return PTR_ERR(blk); | ||
485 | |||
486 | actx->fallback = blk; | ||
487 | tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx); | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm) | ||
492 | { | ||
493 | struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); | ||
494 | |||
495 | crypto_free_ablkcipher(actx->fallback); | ||
496 | actx->fallback = NULL; | ||
497 | } | ||
498 | |||
499 | /* | ||
500 | * Hashing (SHA1/SHA256) | ||
501 | */ | ||
502 | static int mxs_dcp_run_sha(struct ahash_request *req) | ||
503 | { | ||
504 | struct dcp *sdcp = global_sdcp; | ||
505 | int ret; | ||
506 | |||
507 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
508 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | ||
509 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); | ||
510 | |||
511 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; | ||
512 | dma_addr_t digest_phys = dma_map_single(sdcp->dev, | ||
513 | sdcp->coh->sha_digest, | ||
514 | SHA256_DIGEST_SIZE, | ||
515 | DMA_FROM_DEVICE); | ||
516 | |||
517 | dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, | ||
518 | DCP_BUF_SZ, DMA_TO_DEVICE); | ||
519 | |||
520 | /* Fill in the DMA descriptor. */ | ||
521 | desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | | ||
522 | MXS_DCP_CONTROL0_INTERRUPT | | ||
523 | MXS_DCP_CONTROL0_ENABLE_HASH; | ||
524 | if (rctx->init) | ||
525 | desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; | ||
526 | |||
527 | desc->control1 = actx->alg; | ||
528 | desc->next_cmd_addr = 0; | ||
529 | desc->source = buf_phys; | ||
530 | desc->destination = 0; | ||
531 | desc->size = actx->fill; | ||
532 | desc->payload = 0; | ||
533 | desc->status = 0; | ||
534 | |||
535 | /* Set HASH_TERM bit for last transfer block. */ | ||
536 | if (rctx->fini) { | ||
537 | desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; | ||
538 | desc->payload = digest_phys; | ||
539 | } | ||
540 | |||
541 | ret = mxs_dcp_start_dma(actx); | ||
542 | |||
543 | dma_unmap_single(sdcp->dev, digest_phys, SHA256_DIGEST_SIZE, | ||
544 | DMA_FROM_DEVICE); | ||
545 | dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); | ||
546 | |||
547 | return ret; | ||
548 | } | ||
549 | |||
550 | static int dcp_sha_req_to_buf(struct crypto_async_request *arq) | ||
551 | { | ||
552 | struct dcp *sdcp = global_sdcp; | ||
553 | |||
554 | struct ahash_request *req = ahash_request_cast(arq); | ||
555 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
556 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | ||
557 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); | ||
558 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); | ||
559 | const int nents = sg_nents(req->src); | ||
560 | |||
561 | uint8_t *digest = sdcp->coh->sha_digest; | ||
562 | uint8_t *in_buf = sdcp->coh->sha_in_buf; | ||
563 | |||
564 | uint8_t *src_buf; | ||
565 | |||
566 | struct scatterlist *src; | ||
567 | |||
568 | unsigned int i, len, clen; | ||
569 | int ret; | ||
570 | |||
571 | int fin = rctx->fini; | ||
572 | if (fin) | ||
573 | rctx->fini = 0; | ||
574 | |||
575 | for_each_sg(req->src, src, nents, i) { | ||
576 | src_buf = sg_virt(src); | ||
577 | len = sg_dma_len(src); | ||
578 | |||
579 | do { | ||
580 | if (actx->fill + len > DCP_BUF_SZ) | ||
581 | clen = DCP_BUF_SZ - actx->fill; | ||
582 | else | ||
583 | clen = len; | ||
584 | |||
585 | memcpy(in_buf + actx->fill, src_buf, clen); | ||
586 | len -= clen; | ||
587 | src_buf += clen; | ||
588 | actx->fill += clen; | ||
589 | |||
590 | /* | ||
591 | * If we filled the buffer and still have some | ||
592 | * more data, submit the buffer. | ||
593 | */ | ||
594 | if (len && actx->fill == DCP_BUF_SZ) { | ||
595 | ret = mxs_dcp_run_sha(req); | ||
596 | if (ret) | ||
597 | return ret; | ||
598 | actx->fill = 0; | ||
599 | rctx->init = 0; | ||
600 | } | ||
601 | } while (len); | ||
602 | } | ||
603 | |||
604 | if (fin) { | ||
605 | rctx->fini = 1; | ||
606 | |||
607 | /* Submit whatever is left. */ | ||
608 | ret = mxs_dcp_run_sha(req); | ||
609 | if (ret || !req->result) | ||
610 | return ret; | ||
611 | actx->fill = 0; | ||
612 | |||
613 | /* For some reason, the result is flipped. */ | ||
614 | for (i = 0; i < halg->digestsize; i++) | ||
615 | req->result[i] = digest[halg->digestsize - i - 1]; | ||
616 | } | ||
617 | |||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | static int dcp_chan_thread_sha(void *data) | ||
622 | { | ||
623 | struct dcp *sdcp = global_sdcp; | ||
624 | const int chan = DCP_CHAN_HASH_SHA; | ||
625 | |||
626 | struct crypto_async_request *backlog; | ||
627 | struct crypto_async_request *arq; | ||
628 | |||
629 | struct dcp_sha_req_ctx *rctx; | ||
630 | |||
631 | struct ahash_request *req; | ||
632 | int ret, fini; | ||
633 | |||
634 | do { | ||
635 | __set_current_state(TASK_INTERRUPTIBLE); | ||
636 | |||
637 | mutex_lock(&sdcp->mutex[chan]); | ||
638 | backlog = crypto_get_backlog(&sdcp->queue[chan]); | ||
639 | arq = crypto_dequeue_request(&sdcp->queue[chan]); | ||
640 | mutex_unlock(&sdcp->mutex[chan]); | ||
641 | |||
642 | if (backlog) | ||
643 | backlog->complete(backlog, -EINPROGRESS); | ||
644 | |||
645 | if (arq) { | ||
646 | req = ahash_request_cast(arq); | ||
647 | rctx = ahash_request_ctx(req); | ||
648 | |||
649 | ret = dcp_sha_req_to_buf(arq); | ||
650 | fini = rctx->fini; | ||
651 | arq->complete(arq, ret); | ||
652 | if (!fini) | ||
653 | continue; | ||
654 | } | ||
655 | |||
656 | schedule(); | ||
657 | } while (!kthread_should_stop()); | ||
658 | |||
659 | return 0; | ||
660 | } | ||
661 | |||
662 | static int dcp_sha_init(struct ahash_request *req) | ||
663 | { | ||
664 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
665 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | ||
666 | |||
667 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); | ||
668 | |||
669 | /* | ||
670 | * Start hashing session. The code below only inits the | ||
671 | * hashing session context, nothing more. | ||
672 | */ | ||
673 | memset(actx, 0, sizeof(*actx)); | ||
674 | |||
675 | if (strcmp(halg->base.cra_name, "sha1") == 0) | ||
676 | actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; | ||
677 | else | ||
678 | actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; | ||
679 | |||
680 | actx->fill = 0; | ||
681 | actx->hot = 0; | ||
682 | actx->chan = DCP_CHAN_HASH_SHA; | ||
683 | |||
684 | mutex_init(&actx->mutex); | ||
685 | |||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | static int dcp_sha_update_fx(struct ahash_request *req, int fini) | ||
690 | { | ||
691 | struct dcp *sdcp = global_sdcp; | ||
692 | |||
693 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); | ||
694 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
695 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | ||
696 | |||
697 | int ret; | ||
698 | |||
699 | /* | ||
700 | * Ignore requests that have no data in them and are not | ||
701 | * the trailing requests in the stream of requests. | ||
702 | */ | ||
703 | if (!req->nbytes && !fini) | ||
704 | return 0; | ||
705 | |||
706 | mutex_lock(&actx->mutex); | ||
707 | |||
708 | rctx->fini = fini; | ||
709 | |||
710 | if (!actx->hot) { | ||
711 | actx->hot = 1; | ||
712 | rctx->init = 1; | ||
713 | } | ||
714 | |||
715 | mutex_lock(&sdcp->mutex[actx->chan]); | ||
716 | ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); | ||
717 | mutex_unlock(&sdcp->mutex[actx->chan]); | ||
718 | |||
719 | wake_up_process(sdcp->thread[actx->chan]); | ||
720 | mutex_unlock(&actx->mutex); | ||
721 | |||
722 | return -EINPROGRESS; | ||
723 | } | ||
724 | |||
725 | static int dcp_sha_update(struct ahash_request *req) | ||
726 | { | ||
727 | return dcp_sha_update_fx(req, 0); | ||
728 | } | ||
729 | |||
730 | static int dcp_sha_final(struct ahash_request *req) | ||
731 | { | ||
732 | ahash_request_set_crypt(req, NULL, req->result, 0); | ||
733 | req->nbytes = 0; | ||
734 | return dcp_sha_update_fx(req, 1); | ||
735 | } | ||
736 | |||
737 | static int dcp_sha_finup(struct ahash_request *req) | ||
738 | { | ||
739 | return dcp_sha_update_fx(req, 1); | ||
740 | } | ||
741 | |||
742 | static int dcp_sha_digest(struct ahash_request *req) | ||
743 | { | ||
744 | int ret; | ||
745 | |||
746 | ret = dcp_sha_init(req); | ||
747 | if (ret) | ||
748 | return ret; | ||
749 | |||
750 | return dcp_sha_finup(req); | ||
751 | } | ||
752 | |||
753 | static int dcp_sha_cra_init(struct crypto_tfm *tfm) | ||
754 | { | ||
755 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
756 | sizeof(struct dcp_sha_req_ctx)); | ||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static void dcp_sha_cra_exit(struct crypto_tfm *tfm) | ||
761 | { | ||
762 | } | ||
763 | |||
764 | /* AES 128 ECB and AES 128 CBC */ | ||
765 | static struct crypto_alg dcp_aes_algs[] = { | ||
766 | { | ||
767 | .cra_name = "ecb(aes)", | ||
768 | .cra_driver_name = "ecb-aes-dcp", | ||
769 | .cra_priority = 400, | ||
770 | .cra_alignmask = 15, | ||
771 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
772 | CRYPTO_ALG_ASYNC | | ||
773 | CRYPTO_ALG_NEED_FALLBACK, | ||
774 | .cra_init = mxs_dcp_aes_fallback_init, | ||
775 | .cra_exit = mxs_dcp_aes_fallback_exit, | ||
776 | .cra_blocksize = AES_BLOCK_SIZE, | ||
777 | .cra_ctxsize = sizeof(struct dcp_async_ctx), | ||
778 | .cra_type = &crypto_ablkcipher_type, | ||
779 | .cra_module = THIS_MODULE, | ||
780 | .cra_u = { | ||
781 | .ablkcipher = { | ||
782 | .min_keysize = AES_MIN_KEY_SIZE, | ||
783 | .max_keysize = AES_MAX_KEY_SIZE, | ||
784 | .setkey = mxs_dcp_aes_setkey, | ||
785 | .encrypt = mxs_dcp_aes_ecb_encrypt, | ||
786 | .decrypt = mxs_dcp_aes_ecb_decrypt | ||
787 | }, | ||
788 | }, | ||
789 | }, { | ||
790 | .cra_name = "cbc(aes)", | ||
791 | .cra_driver_name = "cbc-aes-dcp", | ||
792 | .cra_priority = 400, | ||
793 | .cra_alignmask = 15, | ||
794 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
795 | CRYPTO_ALG_ASYNC | | ||
796 | CRYPTO_ALG_NEED_FALLBACK, | ||
797 | .cra_init = mxs_dcp_aes_fallback_init, | ||
798 | .cra_exit = mxs_dcp_aes_fallback_exit, | ||
799 | .cra_blocksize = AES_BLOCK_SIZE, | ||
800 | .cra_ctxsize = sizeof(struct dcp_async_ctx), | ||
801 | .cra_type = &crypto_ablkcipher_type, | ||
802 | .cra_module = THIS_MODULE, | ||
803 | .cra_u = { | ||
804 | .ablkcipher = { | ||
805 | .min_keysize = AES_MIN_KEY_SIZE, | ||
806 | .max_keysize = AES_MAX_KEY_SIZE, | ||
807 | .setkey = mxs_dcp_aes_setkey, | ||
808 | .encrypt = mxs_dcp_aes_cbc_encrypt, | ||
809 | .decrypt = mxs_dcp_aes_cbc_decrypt, | ||
810 | .ivsize = AES_BLOCK_SIZE, | ||
811 | }, | ||
812 | }, | ||
813 | }, | ||
814 | }; | ||
815 | |||
816 | /* SHA1 */ | ||
817 | static struct ahash_alg dcp_sha1_alg = { | ||
818 | .init = dcp_sha_init, | ||
819 | .update = dcp_sha_update, | ||
820 | .final = dcp_sha_final, | ||
821 | .finup = dcp_sha_finup, | ||
822 | .digest = dcp_sha_digest, | ||
823 | .halg = { | ||
824 | .digestsize = SHA1_DIGEST_SIZE, | ||
825 | .base = { | ||
826 | .cra_name = "sha1", | ||
827 | .cra_driver_name = "sha1-dcp", | ||
828 | .cra_priority = 400, | ||
829 | .cra_alignmask = 63, | ||
830 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
831 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
832 | .cra_ctxsize = sizeof(struct dcp_async_ctx), | ||
833 | .cra_module = THIS_MODULE, | ||
834 | .cra_init = dcp_sha_cra_init, | ||
835 | .cra_exit = dcp_sha_cra_exit, | ||
836 | }, | ||
837 | }, | ||
838 | }; | ||
839 | |||
840 | /* SHA256 */ | ||
841 | static struct ahash_alg dcp_sha256_alg = { | ||
842 | .init = dcp_sha_init, | ||
843 | .update = dcp_sha_update, | ||
844 | .final = dcp_sha_final, | ||
845 | .finup = dcp_sha_finup, | ||
846 | .digest = dcp_sha_digest, | ||
847 | .halg = { | ||
848 | .digestsize = SHA256_DIGEST_SIZE, | ||
849 | .base = { | ||
850 | .cra_name = "sha256", | ||
851 | .cra_driver_name = "sha256-dcp", | ||
852 | .cra_priority = 400, | ||
853 | .cra_alignmask = 63, | ||
854 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
855 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
856 | .cra_ctxsize = sizeof(struct dcp_async_ctx), | ||
857 | .cra_module = THIS_MODULE, | ||
858 | .cra_init = dcp_sha_cra_init, | ||
859 | .cra_exit = dcp_sha_cra_exit, | ||
860 | }, | ||
861 | }, | ||
862 | }; | ||
863 | |||
864 | static irqreturn_t mxs_dcp_irq(int irq, void *context) | ||
865 | { | ||
866 | struct dcp *sdcp = context; | ||
867 | uint32_t stat; | ||
868 | int i; | ||
869 | |||
870 | stat = readl(sdcp->base + MXS_DCP_STAT); | ||
871 | stat &= MXS_DCP_STAT_IRQ_MASK; | ||
872 | if (!stat) | ||
873 | return IRQ_NONE; | ||
874 | |||
875 | /* Clear the interrupts. */ | ||
876 | writel(stat, sdcp->base + MXS_DCP_STAT_CLR); | ||
877 | |||
878 | /* Complete the DMA requests that finished. */ | ||
879 | for (i = 0; i < DCP_MAX_CHANS; i++) | ||
880 | if (stat & (1 << i)) | ||
881 | complete(&sdcp->completion[i]); | ||
882 | |||
883 | return IRQ_HANDLED; | ||
884 | } | ||
885 | |||
886 | static int mxs_dcp_probe(struct platform_device *pdev) | ||
887 | { | ||
888 | struct device *dev = &pdev->dev; | ||
889 | struct dcp *sdcp = NULL; | ||
890 | int i, ret; | ||
891 | |||
892 | struct resource *iores; | ||
893 | int dcp_vmi_irq, dcp_irq; | ||
894 | |||
895 | mutex_lock(&global_mutex); | ||
896 | if (global_sdcp) { | ||
897 | dev_err(dev, "Only one DCP instance allowed!\n"); | ||
898 | ret = -ENODEV; | ||
899 | goto err_mutex; | ||
900 | } | ||
901 | |||
902 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
903 | dcp_vmi_irq = platform_get_irq(pdev, 0); | ||
904 | dcp_irq = platform_get_irq(pdev, 1); | ||
905 | if (dcp_vmi_irq < 0 || dcp_irq < 0) { | ||
906 | ret = -EINVAL; | ||
907 | goto err_mutex; | ||
908 | } | ||
909 | |||
910 | sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); | ||
911 | if (!sdcp) { | ||
912 | ret = -ENOMEM; | ||
913 | goto err_mutex; | ||
914 | } | ||
915 | |||
916 | sdcp->dev = dev; | ||
917 | sdcp->base = devm_ioremap_resource(dev, iores); | ||
918 | if (IS_ERR(sdcp->base)) { | ||
919 | ret = PTR_ERR(sdcp->base); | ||
920 | goto err_mutex; | ||
921 | } | ||
922 | |||
923 | ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0, | ||
924 | "dcp-vmi-irq", sdcp); | ||
925 | if (ret) { | ||
926 | dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); | ||
927 | goto err_mutex; | ||
928 | } | ||
929 | |||
930 | ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0, | ||
931 | "dcp-irq", sdcp); | ||
932 | if (ret) { | ||
933 | dev_err(dev, "Failed to claim DCP IRQ!\n"); | ||
934 | goto err_mutex; | ||
935 | } | ||
936 | |||
937 | /* Allocate coherent helper block. */ | ||
938 | sdcp->coh = kzalloc(sizeof(struct dcp_coherent_block), GFP_KERNEL); | ||
939 | if (!sdcp->coh) { | ||
940 | dev_err(dev, "Error allocating coherent block\n"); | ||
941 | ret = -ENOMEM; | ||
942 | goto err_mutex; | ||
943 | } | ||
944 | |||
945 | /* Restart the DCP block. */ | ||
946 | stmp_reset_block(sdcp->base); | ||
947 | |||
948 | /* Initialize control register. */ | ||
949 | writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | | ||
950 | MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, | ||
951 | sdcp->base + MXS_DCP_CTRL); | ||
952 | |||
953 | /* Enable all DCP DMA channels. */ | ||
954 | writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, | ||
955 | sdcp->base + MXS_DCP_CHANNELCTRL); | ||
956 | |||
957 | /* | ||
958 | * We do not enable context switching. Give the context buffer a | ||
959 | * pointer to an illegal address so if context switching is | ||
960 | * inadvertantly enabled, the DCP will return an error instead of | ||
961 | * trashing good memory. The DCP DMA cannot access ROM, so any ROM | ||
962 | * address will do. | ||
963 | */ | ||
964 | writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); | ||
965 | for (i = 0; i < DCP_MAX_CHANS; i++) | ||
966 | writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); | ||
967 | writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); | ||
968 | |||
969 | global_sdcp = sdcp; | ||
970 | |||
971 | platform_set_drvdata(pdev, sdcp); | ||
972 | |||
973 | for (i = 0; i < DCP_MAX_CHANS; i++) { | ||
974 | mutex_init(&sdcp->mutex[i]); | ||
975 | init_completion(&sdcp->completion[i]); | ||
976 | crypto_init_queue(&sdcp->queue[i], 50); | ||
977 | } | ||
978 | |||
979 | /* Create the SHA and AES handler threads. */ | ||
980 | sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, | ||
981 | NULL, "mxs_dcp_chan/sha"); | ||
982 | if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { | ||
983 | dev_err(dev, "Error starting SHA thread!\n"); | ||
984 | ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); | ||
985 | goto err_free_coherent; | ||
986 | } | ||
987 | |||
988 | sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, | ||
989 | NULL, "mxs_dcp_chan/aes"); | ||
990 | if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { | ||
991 | dev_err(dev, "Error starting SHA thread!\n"); | ||
992 | ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); | ||
993 | goto err_destroy_sha_thread; | ||
994 | } | ||
995 | |||
996 | /* Register the various crypto algorithms. */ | ||
997 | sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); | ||
998 | |||
999 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { | ||
1000 | ret = crypto_register_algs(dcp_aes_algs, | ||
1001 | ARRAY_SIZE(dcp_aes_algs)); | ||
1002 | if (ret) { | ||
1003 | /* Failed to register algorithm. */ | ||
1004 | dev_err(dev, "Failed to register AES crypto!\n"); | ||
1005 | goto err_destroy_aes_thread; | ||
1006 | } | ||
1007 | } | ||
1008 | |||
1009 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { | ||
1010 | ret = crypto_register_ahash(&dcp_sha1_alg); | ||
1011 | if (ret) { | ||
1012 | dev_err(dev, "Failed to register %s hash!\n", | ||
1013 | dcp_sha1_alg.halg.base.cra_name); | ||
1014 | goto err_unregister_aes; | ||
1015 | } | ||
1016 | } | ||
1017 | |||
1018 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { | ||
1019 | ret = crypto_register_ahash(&dcp_sha256_alg); | ||
1020 | if (ret) { | ||
1021 | dev_err(dev, "Failed to register %s hash!\n", | ||
1022 | dcp_sha256_alg.halg.base.cra_name); | ||
1023 | goto err_unregister_sha1; | ||
1024 | } | ||
1025 | } | ||
1026 | |||
1027 | return 0; | ||
1028 | |||
1029 | err_unregister_sha1: | ||
1030 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) | ||
1031 | crypto_unregister_ahash(&dcp_sha1_alg); | ||
1032 | |||
1033 | err_unregister_aes: | ||
1034 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) | ||
1035 | crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); | ||
1036 | |||
1037 | err_destroy_aes_thread: | ||
1038 | kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); | ||
1039 | |||
1040 | err_destroy_sha_thread: | ||
1041 | kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); | ||
1042 | |||
1043 | err_free_coherent: | ||
1044 | kfree(sdcp->coh); | ||
1045 | err_mutex: | ||
1046 | mutex_unlock(&global_mutex); | ||
1047 | return ret; | ||
1048 | } | ||
1049 | |||
1050 | static int mxs_dcp_remove(struct platform_device *pdev) | ||
1051 | { | ||
1052 | struct dcp *sdcp = platform_get_drvdata(pdev); | ||
1053 | |||
1054 | kfree(sdcp->coh); | ||
1055 | |||
1056 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) | ||
1057 | crypto_unregister_ahash(&dcp_sha256_alg); | ||
1058 | |||
1059 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) | ||
1060 | crypto_unregister_ahash(&dcp_sha1_alg); | ||
1061 | |||
1062 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) | ||
1063 | crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); | ||
1064 | |||
1065 | kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); | ||
1066 | kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); | ||
1067 | |||
1068 | platform_set_drvdata(pdev, NULL); | ||
1069 | |||
1070 | mutex_lock(&global_mutex); | ||
1071 | global_sdcp = NULL; | ||
1072 | mutex_unlock(&global_mutex); | ||
1073 | |||
1074 | return 0; | ||
1075 | } | ||
1076 | |||
1077 | static const struct of_device_id mxs_dcp_dt_ids[] = { | ||
1078 | { .compatible = "fsl,imx23-dcp", .data = NULL, }, | ||
1079 | { .compatible = "fsl,imx28-dcp", .data = NULL, }, | ||
1080 | { /* sentinel */ } | ||
1081 | }; | ||
1082 | |||
1083 | MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); | ||
1084 | |||
1085 | static struct platform_driver mxs_dcp_driver = { | ||
1086 | .probe = mxs_dcp_probe, | ||
1087 | .remove = mxs_dcp_remove, | ||
1088 | .driver = { | ||
1089 | .name = "mxs-dcp", | ||
1090 | .owner = THIS_MODULE, | ||
1091 | .of_match_table = mxs_dcp_dt_ids, | ||
1092 | }, | ||
1093 | }; | ||
1094 | |||
1095 | module_platform_driver(mxs_dcp_driver); | ||
1096 | |||
1097 | MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); | ||
1098 | MODULE_DESCRIPTION("Freescale MXS DCP Driver"); | ||
1099 | MODULE_LICENSE("GPL"); | ||
1100 | MODULE_ALIAS("platform:mxs-dcp"); | ||