diff options
Diffstat (limited to 'drivers/crypto')
33 files changed, 8426 insertions, 676 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1092a770482e..7d74d092aa8f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -298,7 +298,7 @@ config CRYPTO_DEV_TEGRA_AES | |||
298 | will be called tegra-aes. | 298 | will be called tegra-aes. |
299 | 299 | ||
300 | config CRYPTO_DEV_NX | 300 | config CRYPTO_DEV_NX |
301 | tristate "Support for Power7+ in-Nest cryptographic accleration" | 301 | tristate "Support for Power7+ in-Nest cryptographic acceleration" |
302 | depends on PPC64 && IBMVIO | 302 | depends on PPC64 && IBMVIO |
303 | select CRYPTO_AES | 303 | select CRYPTO_AES |
304 | select CRYPTO_CBC | 304 | select CRYPTO_CBC |
@@ -325,4 +325,58 @@ if CRYPTO_DEV_UX500 | |||
325 | source "drivers/crypto/ux500/Kconfig" | 325 | source "drivers/crypto/ux500/Kconfig" |
326 | endif # if CRYPTO_DEV_UX500 | 326 | endif # if CRYPTO_DEV_UX500 |
327 | 327 | ||
328 | config CRYPTO_DEV_BFIN_CRC | ||
329 | tristate "Support for Blackfin CRC hardware" | ||
330 | depends on BF60x | ||
331 | help | ||
332 | Newer Blackfin processors have CRC hardware. Select this if you | ||
333 | want to use the Blackfin CRC module. | ||
334 | |||
335 | config CRYPTO_DEV_ATMEL_AES | ||
336 | tristate "Support for Atmel AES hw accelerator" | ||
337 | depends on ARCH_AT91 | ||
338 | select CRYPTO_CBC | ||
339 | select CRYPTO_ECB | ||
340 | select CRYPTO_AES | ||
341 | select CRYPTO_ALGAPI | ||
342 | select CRYPTO_BLKCIPHER | ||
343 | select CONFIG_AT_HDMAC | ||
344 | help | ||
345 | Some Atmel processors have AES hw accelerator. | ||
346 | Select this if you want to use the Atmel module for | ||
347 | AES algorithms. | ||
348 | |||
349 | To compile this driver as a module, choose M here: the module | ||
350 | will be called atmel-aes. | ||
351 | |||
352 | config CRYPTO_DEV_ATMEL_TDES | ||
353 | tristate "Support for Atmel DES/TDES hw accelerator" | ||
354 | depends on ARCH_AT91 | ||
355 | select CRYPTO_DES | ||
356 | select CRYPTO_CBC | ||
357 | select CRYPTO_ECB | ||
358 | select CRYPTO_ALGAPI | ||
359 | select CRYPTO_BLKCIPHER | ||
360 | help | ||
361 | Some Atmel processors have DES/TDES hw accelerator. | ||
362 | Select this if you want to use the Atmel module for | ||
363 | DES/TDES algorithms. | ||
364 | |||
365 | To compile this driver as a module, choose M here: the module | ||
366 | will be called atmel-tdes. | ||
367 | |||
368 | config CRYPTO_DEV_ATMEL_SHA | ||
369 | tristate "Support for Atmel SHA1/SHA256 hw accelerator" | ||
370 | depends on ARCH_AT91 | ||
371 | select CRYPTO_SHA1 | ||
372 | select CRYPTO_SHA256 | ||
373 | select CRYPTO_ALGAPI | ||
374 | help | ||
375 | Some Atmel processors have SHA1/SHA256 hw accelerator. | ||
376 | Select this if you want to use the Atmel module for | ||
377 | SHA1/SHA256 algorithms. | ||
378 | |||
379 | To compile this driver as a module, choose M here: the module | ||
380 | will be called atmel-sha. | ||
381 | |||
328 | endif # CRYPTO_HW | 382 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 01390325d72d..880a47b0b023 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -14,4 +14,9 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o | |||
14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o | 14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o |
15 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | 15 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o |
16 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o | 16 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o |
17 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ \ No newline at end of file | 17 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ |
18 | obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o | ||
19 | obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ | ||
20 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o | ||
21 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o | ||
22 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o | ||
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h new file mode 100644 index 000000000000..2786bb1a5aa0 --- /dev/null +++ b/drivers/crypto/atmel-aes-regs.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef __ATMEL_AES_REGS_H__ | ||
2 | #define __ATMEL_AES_REGS_H__ | ||
3 | |||
4 | #define AES_CR 0x00 | ||
5 | #define AES_CR_START (1 << 0) | ||
6 | #define AES_CR_SWRST (1 << 8) | ||
7 | #define AES_CR_LOADSEED (1 << 16) | ||
8 | |||
9 | #define AES_MR 0x04 | ||
10 | #define AES_MR_CYPHER_DEC (0 << 0) | ||
11 | #define AES_MR_CYPHER_ENC (1 << 0) | ||
12 | #define AES_MR_DUALBUFF (1 << 3) | ||
13 | #define AES_MR_PROCDLY_MASK (0xF << 4) | ||
14 | #define AES_MR_PROCDLY_OFFSET 4 | ||
15 | #define AES_MR_SMOD_MASK (0x3 << 8) | ||
16 | #define AES_MR_SMOD_MANUAL (0x0 << 8) | ||
17 | #define AES_MR_SMOD_AUTO (0x1 << 8) | ||
18 | #define AES_MR_SMOD_IDATAR0 (0x2 << 8) | ||
19 | #define AES_MR_KEYSIZE_MASK (0x3 << 10) | ||
20 | #define AES_MR_KEYSIZE_128 (0x0 << 10) | ||
21 | #define AES_MR_KEYSIZE_192 (0x1 << 10) | ||
22 | #define AES_MR_KEYSIZE_256 (0x2 << 10) | ||
23 | #define AES_MR_OPMOD_MASK (0x7 << 12) | ||
24 | #define AES_MR_OPMOD_ECB (0x0 << 12) | ||
25 | #define AES_MR_OPMOD_CBC (0x1 << 12) | ||
26 | #define AES_MR_OPMOD_OFB (0x2 << 12) | ||
27 | #define AES_MR_OPMOD_CFB (0x3 << 12) | ||
28 | #define AES_MR_OPMOD_CTR (0x4 << 12) | ||
29 | #define AES_MR_LOD (0x1 << 15) | ||
30 | #define AES_MR_CFBS_MASK (0x7 << 16) | ||
31 | #define AES_MR_CFBS_128b (0x0 << 16) | ||
32 | #define AES_MR_CFBS_64b (0x1 << 16) | ||
33 | #define AES_MR_CFBS_32b (0x2 << 16) | ||
34 | #define AES_MR_CFBS_16b (0x3 << 16) | ||
35 | #define AES_MR_CFBS_8b (0x4 << 16) | ||
36 | #define AES_MR_CKEY_MASK (0xF << 20) | ||
37 | #define AES_MR_CKEY_OFFSET 20 | ||
38 | #define AES_MR_CMTYP_MASK (0x1F << 24) | ||
39 | #define AES_MR_CMTYP_OFFSET 24 | ||
40 | |||
41 | #define AES_IER 0x10 | ||
42 | #define AES_IDR 0x14 | ||
43 | #define AES_IMR 0x18 | ||
44 | #define AES_ISR 0x1C | ||
45 | #define AES_INT_DATARDY (1 << 0) | ||
46 | #define AES_INT_URAD (1 << 8) | ||
47 | #define AES_ISR_URAT_MASK (0xF << 12) | ||
48 | #define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12) | ||
49 | #define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12) | ||
50 | #define AES_ISR_URAT_MR_WR_PROC (0x2 << 12) | ||
51 | #define AES_ISR_URAT_ODR_RD_SUBK (0x3 << 12) | ||
52 | #define AES_ISR_URAT_MR_WR_SUBK (0x4 << 12) | ||
53 | #define AES_ISR_URAT_WOR_RD (0x5 << 12) | ||
54 | |||
55 | #define AES_KEYWR(x) (0x20 + ((x) * 0x04)) | ||
56 | #define AES_IDATAR(x) (0x40 + ((x) * 0x04)) | ||
57 | #define AES_ODATAR(x) (0x50 + ((x) * 0x04)) | ||
58 | #define AES_IVR(x) (0x60 + ((x) * 0x04)) | ||
59 | |||
60 | #define AES_HW_VERSION 0xFC | ||
61 | |||
62 | #endif /* __ATMEL_AES_REGS_H__ */ | ||
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c new file mode 100644 index 000000000000..6bb20fffbf49 --- /dev/null +++ b/drivers/crypto/atmel-aes.c | |||
@@ -0,0 +1,1206 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for ATMEL AES HW acceleration. | ||
5 | * | ||
6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | ||
7 | * Author: Nicolas Royer <nicolas@eukrea.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * Some ideas are from omap-aes.c driver. | ||
14 | */ | ||
15 | |||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/hw_random.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | |||
26 | #include <linux/device.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/clk.h> | ||
33 | #include <linux/irq.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/scatterlist.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/crypto.h> | ||
40 | #include <linux/cryptohash.h> | ||
41 | #include <crypto/scatterwalk.h> | ||
42 | #include <crypto/algapi.h> | ||
43 | #include <crypto/aes.h> | ||
44 | #include <crypto/hash.h> | ||
45 | #include <crypto/internal/hash.h> | ||
46 | #include <linux/platform_data/atmel-aes.h> | ||
47 | #include "atmel-aes-regs.h" | ||
48 | |||
49 | #define CFB8_BLOCK_SIZE 1 | ||
50 | #define CFB16_BLOCK_SIZE 2 | ||
51 | #define CFB32_BLOCK_SIZE 4 | ||
52 | #define CFB64_BLOCK_SIZE 8 | ||
53 | |||
54 | /* AES flags */ | ||
55 | #define AES_FLAGS_MODE_MASK 0x01ff | ||
56 | #define AES_FLAGS_ENCRYPT BIT(0) | ||
57 | #define AES_FLAGS_CBC BIT(1) | ||
58 | #define AES_FLAGS_CFB BIT(2) | ||
59 | #define AES_FLAGS_CFB8 BIT(3) | ||
60 | #define AES_FLAGS_CFB16 BIT(4) | ||
61 | #define AES_FLAGS_CFB32 BIT(5) | ||
62 | #define AES_FLAGS_CFB64 BIT(6) | ||
63 | #define AES_FLAGS_OFB BIT(7) | ||
64 | #define AES_FLAGS_CTR BIT(8) | ||
65 | |||
66 | #define AES_FLAGS_INIT BIT(16) | ||
67 | #define AES_FLAGS_DMA BIT(17) | ||
68 | #define AES_FLAGS_BUSY BIT(18) | ||
69 | |||
70 | #define AES_FLAGS_DUALBUFF BIT(24) | ||
71 | |||
72 | #define ATMEL_AES_QUEUE_LENGTH 1 | ||
73 | #define ATMEL_AES_CACHE_SIZE 0 | ||
74 | |||
75 | #define ATMEL_AES_DMA_THRESHOLD 16 | ||
76 | |||
77 | |||
78 | struct atmel_aes_dev; | ||
79 | |||
80 | struct atmel_aes_ctx { | ||
81 | struct atmel_aes_dev *dd; | ||
82 | |||
83 | int keylen; | ||
84 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | ||
85 | }; | ||
86 | |||
87 | struct atmel_aes_reqctx { | ||
88 | unsigned long mode; | ||
89 | }; | ||
90 | |||
91 | struct atmel_aes_dma { | ||
92 | struct dma_chan *chan; | ||
93 | struct dma_slave_config dma_conf; | ||
94 | }; | ||
95 | |||
96 | struct atmel_aes_dev { | ||
97 | struct list_head list; | ||
98 | unsigned long phys_base; | ||
99 | void __iomem *io_base; | ||
100 | |||
101 | struct atmel_aes_ctx *ctx; | ||
102 | struct device *dev; | ||
103 | struct clk *iclk; | ||
104 | int irq; | ||
105 | |||
106 | unsigned long flags; | ||
107 | int err; | ||
108 | |||
109 | spinlock_t lock; | ||
110 | struct crypto_queue queue; | ||
111 | |||
112 | struct tasklet_struct done_task; | ||
113 | struct tasklet_struct queue_task; | ||
114 | |||
115 | struct ablkcipher_request *req; | ||
116 | size_t total; | ||
117 | |||
118 | struct scatterlist *in_sg; | ||
119 | unsigned int nb_in_sg; | ||
120 | |||
121 | struct scatterlist *out_sg; | ||
122 | unsigned int nb_out_sg; | ||
123 | |||
124 | size_t bufcnt; | ||
125 | |||
126 | u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32)); | ||
127 | int dma_in; | ||
128 | struct atmel_aes_dma dma_lch_in; | ||
129 | |||
130 | u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32)); | ||
131 | int dma_out; | ||
132 | struct atmel_aes_dma dma_lch_out; | ||
133 | |||
134 | u32 hw_version; | ||
135 | }; | ||
136 | |||
137 | struct atmel_aes_drv { | ||
138 | struct list_head dev_list; | ||
139 | spinlock_t lock; | ||
140 | }; | ||
141 | |||
142 | static struct atmel_aes_drv atmel_aes = { | ||
143 | .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list), | ||
144 | .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), | ||
145 | }; | ||
146 | |||
147 | static int atmel_aes_sg_length(struct ablkcipher_request *req, | ||
148 | struct scatterlist *sg) | ||
149 | { | ||
150 | unsigned int total = req->nbytes; | ||
151 | int sg_nb; | ||
152 | unsigned int len; | ||
153 | struct scatterlist *sg_list; | ||
154 | |||
155 | sg_nb = 0; | ||
156 | sg_list = sg; | ||
157 | total = req->nbytes; | ||
158 | |||
159 | while (total) { | ||
160 | len = min(sg_list->length, total); | ||
161 | |||
162 | sg_nb++; | ||
163 | total -= len; | ||
164 | |||
165 | sg_list = sg_next(sg_list); | ||
166 | if (!sg_list) | ||
167 | total = 0; | ||
168 | } | ||
169 | |||
170 | return sg_nb; | ||
171 | } | ||
172 | |||
173 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) | ||
174 | { | ||
175 | return readl_relaxed(dd->io_base + offset); | ||
176 | } | ||
177 | |||
178 | static inline void atmel_aes_write(struct atmel_aes_dev *dd, | ||
179 | u32 offset, u32 value) | ||
180 | { | ||
181 | writel_relaxed(value, dd->io_base + offset); | ||
182 | } | ||
183 | |||
184 | static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, | ||
185 | u32 *value, int count) | ||
186 | { | ||
187 | for (; count--; value++, offset += 4) | ||
188 | *value = atmel_aes_read(dd, offset); | ||
189 | } | ||
190 | |||
191 | static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, | ||
192 | u32 *value, int count) | ||
193 | { | ||
194 | for (; count--; value++, offset += 4) | ||
195 | atmel_aes_write(dd, offset, *value); | ||
196 | } | ||
197 | |||
198 | static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd) | ||
199 | { | ||
200 | atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF); | ||
201 | |||
202 | if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF) | ||
203 | dd->flags |= AES_FLAGS_DUALBUFF; | ||
204 | } | ||
205 | |||
206 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) | ||
207 | { | ||
208 | struct atmel_aes_dev *aes_dd = NULL; | ||
209 | struct atmel_aes_dev *tmp; | ||
210 | |||
211 | spin_lock_bh(&atmel_aes.lock); | ||
212 | if (!ctx->dd) { | ||
213 | list_for_each_entry(tmp, &atmel_aes.dev_list, list) { | ||
214 | aes_dd = tmp; | ||
215 | break; | ||
216 | } | ||
217 | ctx->dd = aes_dd; | ||
218 | } else { | ||
219 | aes_dd = ctx->dd; | ||
220 | } | ||
221 | |||
222 | spin_unlock_bh(&atmel_aes.lock); | ||
223 | |||
224 | return aes_dd; | ||
225 | } | ||
226 | |||
227 | static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | ||
228 | { | ||
229 | clk_prepare_enable(dd->iclk); | ||
230 | |||
231 | if (!(dd->flags & AES_FLAGS_INIT)) { | ||
232 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); | ||
233 | atmel_aes_dualbuff_test(dd); | ||
234 | dd->flags |= AES_FLAGS_INIT; | ||
235 | dd->err = 0; | ||
236 | } | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) | ||
242 | { | ||
243 | atmel_aes_hw_init(dd); | ||
244 | |||
245 | dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION); | ||
246 | |||
247 | clk_disable_unprepare(dd->iclk); | ||
248 | } | ||
249 | |||
250 | static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) | ||
251 | { | ||
252 | struct ablkcipher_request *req = dd->req; | ||
253 | |||
254 | clk_disable_unprepare(dd->iclk); | ||
255 | dd->flags &= ~AES_FLAGS_BUSY; | ||
256 | |||
257 | req->base.complete(&req->base, err); | ||
258 | } | ||
259 | |||
260 | static void atmel_aes_dma_callback(void *data) | ||
261 | { | ||
262 | struct atmel_aes_dev *dd = data; | ||
263 | |||
264 | /* dma_lch_out - completed */ | ||
265 | tasklet_schedule(&dd->done_task); | ||
266 | } | ||
267 | |||
268 | static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd) | ||
269 | { | ||
270 | struct dma_async_tx_descriptor *in_desc, *out_desc; | ||
271 | int nb_dma_sg_in, nb_dma_sg_out; | ||
272 | |||
273 | dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); | ||
274 | if (!dd->nb_in_sg) | ||
275 | goto exit_err; | ||
276 | |||
277 | nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg, | ||
278 | DMA_TO_DEVICE); | ||
279 | if (!nb_dma_sg_in) | ||
280 | goto exit_err; | ||
281 | |||
282 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg, | ||
283 | nb_dma_sg_in, DMA_MEM_TO_DEV, | ||
284 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
285 | |||
286 | if (!in_desc) | ||
287 | goto unmap_in; | ||
288 | |||
289 | /* callback not needed */ | ||
290 | |||
291 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | ||
292 | if (!dd->nb_out_sg) | ||
293 | goto unmap_in; | ||
294 | |||
295 | nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg, | ||
296 | DMA_FROM_DEVICE); | ||
297 | if (!nb_dma_sg_out) | ||
298 | goto unmap_out; | ||
299 | |||
300 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg, | ||
301 | nb_dma_sg_out, DMA_DEV_TO_MEM, | ||
302 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
303 | |||
304 | if (!out_desc) | ||
305 | goto unmap_out; | ||
306 | |||
307 | out_desc->callback = atmel_aes_dma_callback; | ||
308 | out_desc->callback_param = dd; | ||
309 | |||
310 | dd->total -= dd->req->nbytes; | ||
311 | |||
312 | dmaengine_submit(out_desc); | ||
313 | dma_async_issue_pending(dd->dma_lch_out.chan); | ||
314 | |||
315 | dmaengine_submit(in_desc); | ||
316 | dma_async_issue_pending(dd->dma_lch_in.chan); | ||
317 | |||
318 | return 0; | ||
319 | |||
320 | unmap_out: | ||
321 | dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg, | ||
322 | DMA_FROM_DEVICE); | ||
323 | unmap_in: | ||
324 | dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg, | ||
325 | DMA_TO_DEVICE); | ||
326 | exit_err: | ||
327 | return -EINVAL; | ||
328 | } | ||
329 | |||
330 | static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | ||
331 | { | ||
332 | dd->flags &= ~AES_FLAGS_DMA; | ||
333 | |||
334 | /* use cache buffers */ | ||
335 | dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); | ||
336 | if (!dd->nb_in_sg) | ||
337 | return -EINVAL; | ||
338 | |||
339 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | ||
340 | if (!dd->nb_in_sg) | ||
341 | return -EINVAL; | ||
342 | |||
343 | dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, | ||
344 | dd->buf_in, dd->total); | ||
345 | |||
346 | if (!dd->bufcnt) | ||
347 | return -EINVAL; | ||
348 | |||
349 | dd->total -= dd->bufcnt; | ||
350 | |||
351 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | ||
352 | atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in, | ||
353 | dd->bufcnt >> 2); | ||
354 | |||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) | ||
359 | { | ||
360 | int err; | ||
361 | |||
362 | if (dd->flags & AES_FLAGS_CFB8) { | ||
363 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
364 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
365 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
366 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
367 | } else if (dd->flags & AES_FLAGS_CFB16) { | ||
368 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
369 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
370 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
371 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
372 | } else { | ||
373 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
374 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
375 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
376 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
377 | } | ||
378 | |||
379 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | ||
380 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | ||
381 | |||
382 | dd->flags |= AES_FLAGS_DMA; | ||
383 | err = atmel_aes_crypt_dma(dd); | ||
384 | |||
385 | return err; | ||
386 | } | ||
387 | |||
388 | static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) | ||
389 | { | ||
390 | int err; | ||
391 | u32 valcr = 0, valmr = 0; | ||
392 | |||
393 | err = atmel_aes_hw_init(dd); | ||
394 | |||
395 | if (err) | ||
396 | return err; | ||
397 | |||
398 | /* MR register must be set before IV registers */ | ||
399 | if (dd->ctx->keylen == AES_KEYSIZE_128) | ||
400 | valmr |= AES_MR_KEYSIZE_128; | ||
401 | else if (dd->ctx->keylen == AES_KEYSIZE_192) | ||
402 | valmr |= AES_MR_KEYSIZE_192; | ||
403 | else | ||
404 | valmr |= AES_MR_KEYSIZE_256; | ||
405 | |||
406 | if (dd->flags & AES_FLAGS_CBC) { | ||
407 | valmr |= AES_MR_OPMOD_CBC; | ||
408 | } else if (dd->flags & AES_FLAGS_CFB) { | ||
409 | valmr |= AES_MR_OPMOD_CFB; | ||
410 | if (dd->flags & AES_FLAGS_CFB8) | ||
411 | valmr |= AES_MR_CFBS_8b; | ||
412 | else if (dd->flags & AES_FLAGS_CFB16) | ||
413 | valmr |= AES_MR_CFBS_16b; | ||
414 | else if (dd->flags & AES_FLAGS_CFB32) | ||
415 | valmr |= AES_MR_CFBS_32b; | ||
416 | else if (dd->flags & AES_FLAGS_CFB64) | ||
417 | valmr |= AES_MR_CFBS_64b; | ||
418 | } else if (dd->flags & AES_FLAGS_OFB) { | ||
419 | valmr |= AES_MR_OPMOD_OFB; | ||
420 | } else if (dd->flags & AES_FLAGS_CTR) { | ||
421 | valmr |= AES_MR_OPMOD_CTR; | ||
422 | } else { | ||
423 | valmr |= AES_MR_OPMOD_ECB; | ||
424 | } | ||
425 | |||
426 | if (dd->flags & AES_FLAGS_ENCRYPT) | ||
427 | valmr |= AES_MR_CYPHER_ENC; | ||
428 | |||
429 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) { | ||
430 | valmr |= AES_MR_SMOD_IDATAR0; | ||
431 | if (dd->flags & AES_FLAGS_DUALBUFF) | ||
432 | valmr |= AES_MR_DUALBUFF; | ||
433 | } else { | ||
434 | valmr |= AES_MR_SMOD_AUTO; | ||
435 | } | ||
436 | |||
437 | atmel_aes_write(dd, AES_CR, valcr); | ||
438 | atmel_aes_write(dd, AES_MR, valmr); | ||
439 | |||
440 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, | ||
441 | dd->ctx->keylen >> 2); | ||
442 | |||
443 | if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || | ||
444 | (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && | ||
445 | dd->req->info) { | ||
446 | atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4); | ||
447 | } | ||
448 | |||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | ||
453 | struct ablkcipher_request *req) | ||
454 | { | ||
455 | struct crypto_async_request *async_req, *backlog; | ||
456 | struct atmel_aes_ctx *ctx; | ||
457 | struct atmel_aes_reqctx *rctx; | ||
458 | unsigned long flags; | ||
459 | int err, ret = 0; | ||
460 | |||
461 | spin_lock_irqsave(&dd->lock, flags); | ||
462 | if (req) | ||
463 | ret = ablkcipher_enqueue_request(&dd->queue, req); | ||
464 | if (dd->flags & AES_FLAGS_BUSY) { | ||
465 | spin_unlock_irqrestore(&dd->lock, flags); | ||
466 | return ret; | ||
467 | } | ||
468 | backlog = crypto_get_backlog(&dd->queue); | ||
469 | async_req = crypto_dequeue_request(&dd->queue); | ||
470 | if (async_req) | ||
471 | dd->flags |= AES_FLAGS_BUSY; | ||
472 | spin_unlock_irqrestore(&dd->lock, flags); | ||
473 | |||
474 | if (!async_req) | ||
475 | return ret; | ||
476 | |||
477 | if (backlog) | ||
478 | backlog->complete(backlog, -EINPROGRESS); | ||
479 | |||
480 | req = ablkcipher_request_cast(async_req); | ||
481 | |||
482 | /* assign new request to device */ | ||
483 | dd->req = req; | ||
484 | dd->total = req->nbytes; | ||
485 | dd->in_sg = req->src; | ||
486 | dd->out_sg = req->dst; | ||
487 | |||
488 | rctx = ablkcipher_request_ctx(req); | ||
489 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
490 | rctx->mode &= AES_FLAGS_MODE_MASK; | ||
491 | dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; | ||
492 | dd->ctx = ctx; | ||
493 | ctx->dd = dd; | ||
494 | |||
495 | err = atmel_aes_write_ctrl(dd); | ||
496 | if (!err) { | ||
497 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) | ||
498 | err = atmel_aes_crypt_dma_start(dd); | ||
499 | else | ||
500 | err = atmel_aes_crypt_cpu_start(dd); | ||
501 | } | ||
502 | if (err) { | ||
503 | /* aes_task will not finish it, so do it here */ | ||
504 | atmel_aes_finish_req(dd, err); | ||
505 | tasklet_schedule(&dd->queue_task); | ||
506 | } | ||
507 | |||
508 | return ret; | ||
509 | } | ||
510 | |||
511 | static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) | ||
512 | { | ||
513 | int err = -EINVAL; | ||
514 | |||
515 | if (dd->flags & AES_FLAGS_DMA) { | ||
516 | dma_unmap_sg(dd->dev, dd->out_sg, | ||
517 | dd->nb_out_sg, DMA_FROM_DEVICE); | ||
518 | dma_unmap_sg(dd->dev, dd->in_sg, | ||
519 | dd->nb_in_sg, DMA_TO_DEVICE); | ||
520 | err = 0; | ||
521 | } | ||
522 | |||
523 | return err; | ||
524 | } | ||
525 | |||
526 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
527 | { | ||
528 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( | ||
529 | crypto_ablkcipher_reqtfm(req)); | ||
530 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
531 | struct atmel_aes_dev *dd; | ||
532 | |||
533 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
534 | pr_err("request size is not exact amount of AES blocks\n"); | ||
535 | return -EINVAL; | ||
536 | } | ||
537 | |||
538 | dd = atmel_aes_find_dev(ctx); | ||
539 | if (!dd) | ||
540 | return -ENODEV; | ||
541 | |||
542 | rctx->mode = mode; | ||
543 | |||
544 | return atmel_aes_handle_queue(dd, req); | ||
545 | } | ||
546 | |||
547 | static bool atmel_aes_filter(struct dma_chan *chan, void *slave) | ||
548 | { | ||
549 | struct at_dma_slave *sl = slave; | ||
550 | |||
551 | if (sl && sl->dma_dev == chan->device->dev) { | ||
552 | chan->private = sl; | ||
553 | return true; | ||
554 | } else { | ||
555 | return false; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd) | ||
560 | { | ||
561 | int err = -ENOMEM; | ||
562 | struct aes_platform_data *pdata; | ||
563 | dma_cap_mask_t mask_in, mask_out; | ||
564 | |||
565 | pdata = dd->dev->platform_data; | ||
566 | |||
567 | if (pdata && pdata->dma_slave->txdata.dma_dev && | ||
568 | pdata->dma_slave->rxdata.dma_dev) { | ||
569 | |||
570 | /* Try to grab 2 DMA channels */ | ||
571 | dma_cap_zero(mask_in); | ||
572 | dma_cap_set(DMA_SLAVE, mask_in); | ||
573 | |||
574 | dd->dma_lch_in.chan = dma_request_channel(mask_in, | ||
575 | atmel_aes_filter, &pdata->dma_slave->rxdata); | ||
576 | if (!dd->dma_lch_in.chan) | ||
577 | goto err_dma_in; | ||
578 | |||
579 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | ||
580 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | ||
581 | AES_IDATAR(0); | ||
582 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | ||
583 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
584 | dd->dma_lch_in.dma_conf.device_fc = false; | ||
585 | |||
586 | dma_cap_zero(mask_out); | ||
587 | dma_cap_set(DMA_SLAVE, mask_out); | ||
588 | dd->dma_lch_out.chan = dma_request_channel(mask_out, | ||
589 | atmel_aes_filter, &pdata->dma_slave->txdata); | ||
590 | if (!dd->dma_lch_out.chan) | ||
591 | goto err_dma_out; | ||
592 | |||
593 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; | ||
594 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + | ||
595 | AES_ODATAR(0); | ||
596 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | ||
597 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | ||
598 | dd->dma_lch_out.dma_conf.device_fc = false; | ||
599 | |||
600 | return 0; | ||
601 | } else { | ||
602 | return -ENODEV; | ||
603 | } | ||
604 | |||
605 | err_dma_out: | ||
606 | dma_release_channel(dd->dma_lch_in.chan); | ||
607 | err_dma_in: | ||
608 | return err; | ||
609 | } | ||
610 | |||
611 | static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) | ||
612 | { | ||
613 | dma_release_channel(dd->dma_lch_in.chan); | ||
614 | dma_release_channel(dd->dma_lch_out.chan); | ||
615 | } | ||
616 | |||
617 | static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
618 | unsigned int keylen) | ||
619 | { | ||
620 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
621 | |||
622 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | ||
623 | keylen != AES_KEYSIZE_256) { | ||
624 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
625 | return -EINVAL; | ||
626 | } | ||
627 | |||
628 | memcpy(ctx->key, key, keylen); | ||
629 | ctx->keylen = keylen; | ||
630 | |||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
635 | { | ||
636 | return atmel_aes_crypt(req, | ||
637 | AES_FLAGS_ENCRYPT); | ||
638 | } | ||
639 | |||
640 | static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
641 | { | ||
642 | return atmel_aes_crypt(req, | ||
643 | 0); | ||
644 | } | ||
645 | |||
646 | static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
647 | { | ||
648 | return atmel_aes_crypt(req, | ||
649 | AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); | ||
650 | } | ||
651 | |||
652 | static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
653 | { | ||
654 | return atmel_aes_crypt(req, | ||
655 | AES_FLAGS_CBC); | ||
656 | } | ||
657 | |||
658 | static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) | ||
659 | { | ||
660 | return atmel_aes_crypt(req, | ||
661 | AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); | ||
662 | } | ||
663 | |||
664 | static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) | ||
665 | { | ||
666 | return atmel_aes_crypt(req, | ||
667 | AES_FLAGS_OFB); | ||
668 | } | ||
669 | |||
670 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) | ||
671 | { | ||
672 | return atmel_aes_crypt(req, | ||
673 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB); | ||
674 | } | ||
675 | |||
676 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) | ||
677 | { | ||
678 | return atmel_aes_crypt(req, | ||
679 | AES_FLAGS_CFB); | ||
680 | } | ||
681 | |||
682 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) | ||
683 | { | ||
684 | return atmel_aes_crypt(req, | ||
685 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64); | ||
686 | } | ||
687 | |||
688 | static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) | ||
689 | { | ||
690 | return atmel_aes_crypt(req, | ||
691 | AES_FLAGS_CFB | AES_FLAGS_CFB64); | ||
692 | } | ||
693 | |||
694 | static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) | ||
695 | { | ||
696 | return atmel_aes_crypt(req, | ||
697 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32); | ||
698 | } | ||
699 | |||
700 | static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) | ||
701 | { | ||
702 | return atmel_aes_crypt(req, | ||
703 | AES_FLAGS_CFB | AES_FLAGS_CFB32); | ||
704 | } | ||
705 | |||
706 | static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) | ||
707 | { | ||
708 | return atmel_aes_crypt(req, | ||
709 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16); | ||
710 | } | ||
711 | |||
712 | static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) | ||
713 | { | ||
714 | return atmel_aes_crypt(req, | ||
715 | AES_FLAGS_CFB | AES_FLAGS_CFB16); | ||
716 | } | ||
717 | |||
718 | static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) | ||
719 | { | ||
720 | return atmel_aes_crypt(req, | ||
721 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8); | ||
722 | } | ||
723 | |||
724 | static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) | ||
725 | { | ||
726 | return atmel_aes_crypt(req, | ||
727 | AES_FLAGS_CFB | AES_FLAGS_CFB8); | ||
728 | } | ||
729 | |||
730 | static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) | ||
731 | { | ||
732 | return atmel_aes_crypt(req, | ||
733 | AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); | ||
734 | } | ||
735 | |||
736 | static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) | ||
737 | { | ||
738 | return atmel_aes_crypt(req, | ||
739 | AES_FLAGS_CTR); | ||
740 | } | ||
741 | |||
742 | static int atmel_aes_cra_init(struct crypto_tfm *tfm) | ||
743 | { | ||
744 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); | ||
745 | |||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | static void atmel_aes_cra_exit(struct crypto_tfm *tfm) | ||
750 | { | ||
751 | } | ||
752 | |||
753 | static struct crypto_alg aes_algs[] = { | ||
754 | { | ||
755 | .cra_name = "ecb(aes)", | ||
756 | .cra_driver_name = "atmel-ecb-aes", | ||
757 | .cra_priority = 100, | ||
758 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
759 | .cra_blocksize = AES_BLOCK_SIZE, | ||
760 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
761 | .cra_alignmask = 0x0, | ||
762 | .cra_type = &crypto_ablkcipher_type, | ||
763 | .cra_module = THIS_MODULE, | ||
764 | .cra_init = atmel_aes_cra_init, | ||
765 | .cra_exit = atmel_aes_cra_exit, | ||
766 | .cra_u.ablkcipher = { | ||
767 | .min_keysize = AES_MIN_KEY_SIZE, | ||
768 | .max_keysize = AES_MAX_KEY_SIZE, | ||
769 | .setkey = atmel_aes_setkey, | ||
770 | .encrypt = atmel_aes_ecb_encrypt, | ||
771 | .decrypt = atmel_aes_ecb_decrypt, | ||
772 | } | ||
773 | }, | ||
774 | { | ||
775 | .cra_name = "cbc(aes)", | ||
776 | .cra_driver_name = "atmel-cbc-aes", | ||
777 | .cra_priority = 100, | ||
778 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
779 | .cra_blocksize = AES_BLOCK_SIZE, | ||
780 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
781 | .cra_alignmask = 0x0, | ||
782 | .cra_type = &crypto_ablkcipher_type, | ||
783 | .cra_module = THIS_MODULE, | ||
784 | .cra_init = atmel_aes_cra_init, | ||
785 | .cra_exit = atmel_aes_cra_exit, | ||
786 | .cra_u.ablkcipher = { | ||
787 | .min_keysize = AES_MIN_KEY_SIZE, | ||
788 | .max_keysize = AES_MAX_KEY_SIZE, | ||
789 | .ivsize = AES_BLOCK_SIZE, | ||
790 | .setkey = atmel_aes_setkey, | ||
791 | .encrypt = atmel_aes_cbc_encrypt, | ||
792 | .decrypt = atmel_aes_cbc_decrypt, | ||
793 | } | ||
794 | }, | ||
795 | { | ||
796 | .cra_name = "ofb(aes)", | ||
797 | .cra_driver_name = "atmel-ofb-aes", | ||
798 | .cra_priority = 100, | ||
799 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
800 | .cra_blocksize = AES_BLOCK_SIZE, | ||
801 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
802 | .cra_alignmask = 0x0, | ||
803 | .cra_type = &crypto_ablkcipher_type, | ||
804 | .cra_module = THIS_MODULE, | ||
805 | .cra_init = atmel_aes_cra_init, | ||
806 | .cra_exit = atmel_aes_cra_exit, | ||
807 | .cra_u.ablkcipher = { | ||
808 | .min_keysize = AES_MIN_KEY_SIZE, | ||
809 | .max_keysize = AES_MAX_KEY_SIZE, | ||
810 | .ivsize = AES_BLOCK_SIZE, | ||
811 | .setkey = atmel_aes_setkey, | ||
812 | .encrypt = atmel_aes_ofb_encrypt, | ||
813 | .decrypt = atmel_aes_ofb_decrypt, | ||
814 | } | ||
815 | }, | ||
816 | { | ||
817 | .cra_name = "cfb(aes)", | ||
818 | .cra_driver_name = "atmel-cfb-aes", | ||
819 | .cra_priority = 100, | ||
820 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
821 | .cra_blocksize = AES_BLOCK_SIZE, | ||
822 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
823 | .cra_alignmask = 0x0, | ||
824 | .cra_type = &crypto_ablkcipher_type, | ||
825 | .cra_module = THIS_MODULE, | ||
826 | .cra_init = atmel_aes_cra_init, | ||
827 | .cra_exit = atmel_aes_cra_exit, | ||
828 | .cra_u.ablkcipher = { | ||
829 | .min_keysize = AES_MIN_KEY_SIZE, | ||
830 | .max_keysize = AES_MAX_KEY_SIZE, | ||
831 | .ivsize = AES_BLOCK_SIZE, | ||
832 | .setkey = atmel_aes_setkey, | ||
833 | .encrypt = atmel_aes_cfb_encrypt, | ||
834 | .decrypt = atmel_aes_cfb_decrypt, | ||
835 | } | ||
836 | }, | ||
837 | { | ||
838 | .cra_name = "cfb32(aes)", | ||
839 | .cra_driver_name = "atmel-cfb32-aes", | ||
840 | .cra_priority = 100, | ||
841 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
842 | .cra_blocksize = CFB32_BLOCK_SIZE, | ||
843 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
844 | .cra_alignmask = 0x0, | ||
845 | .cra_type = &crypto_ablkcipher_type, | ||
846 | .cra_module = THIS_MODULE, | ||
847 | .cra_init = atmel_aes_cra_init, | ||
848 | .cra_exit = atmel_aes_cra_exit, | ||
849 | .cra_u.ablkcipher = { | ||
850 | .min_keysize = AES_MIN_KEY_SIZE, | ||
851 | .max_keysize = AES_MAX_KEY_SIZE, | ||
852 | .ivsize = AES_BLOCK_SIZE, | ||
853 | .setkey = atmel_aes_setkey, | ||
854 | .encrypt = atmel_aes_cfb32_encrypt, | ||
855 | .decrypt = atmel_aes_cfb32_decrypt, | ||
856 | } | ||
857 | }, | ||
858 | { | ||
859 | .cra_name = "cfb16(aes)", | ||
860 | .cra_driver_name = "atmel-cfb16-aes", | ||
861 | .cra_priority = 100, | ||
862 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
863 | .cra_blocksize = CFB16_BLOCK_SIZE, | ||
864 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
865 | .cra_alignmask = 0x0, | ||
866 | .cra_type = &crypto_ablkcipher_type, | ||
867 | .cra_module = THIS_MODULE, | ||
868 | .cra_init = atmel_aes_cra_init, | ||
869 | .cra_exit = atmel_aes_cra_exit, | ||
870 | .cra_u.ablkcipher = { | ||
871 | .min_keysize = AES_MIN_KEY_SIZE, | ||
872 | .max_keysize = AES_MAX_KEY_SIZE, | ||
873 | .ivsize = AES_BLOCK_SIZE, | ||
874 | .setkey = atmel_aes_setkey, | ||
875 | .encrypt = atmel_aes_cfb16_encrypt, | ||
876 | .decrypt = atmel_aes_cfb16_decrypt, | ||
877 | } | ||
878 | }, | ||
879 | { | ||
880 | .cra_name = "cfb8(aes)", | ||
881 | .cra_driver_name = "atmel-cfb8-aes", | ||
882 | .cra_priority = 100, | ||
883 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
884 | .cra_blocksize = CFB64_BLOCK_SIZE, | ||
885 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
886 | .cra_alignmask = 0x0, | ||
887 | .cra_type = &crypto_ablkcipher_type, | ||
888 | .cra_module = THIS_MODULE, | ||
889 | .cra_init = atmel_aes_cra_init, | ||
890 | .cra_exit = atmel_aes_cra_exit, | ||
891 | .cra_u.ablkcipher = { | ||
892 | .min_keysize = AES_MIN_KEY_SIZE, | ||
893 | .max_keysize = AES_MAX_KEY_SIZE, | ||
894 | .ivsize = AES_BLOCK_SIZE, | ||
895 | .setkey = atmel_aes_setkey, | ||
896 | .encrypt = atmel_aes_cfb8_encrypt, | ||
897 | .decrypt = atmel_aes_cfb8_decrypt, | ||
898 | } | ||
899 | }, | ||
900 | { | ||
901 | .cra_name = "ctr(aes)", | ||
902 | .cra_driver_name = "atmel-ctr-aes", | ||
903 | .cra_priority = 100, | ||
904 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
905 | .cra_blocksize = AES_BLOCK_SIZE, | ||
906 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
907 | .cra_alignmask = 0x0, | ||
908 | .cra_type = &crypto_ablkcipher_type, | ||
909 | .cra_module = THIS_MODULE, | ||
910 | .cra_init = atmel_aes_cra_init, | ||
911 | .cra_exit = atmel_aes_cra_exit, | ||
912 | .cra_u.ablkcipher = { | ||
913 | .min_keysize = AES_MIN_KEY_SIZE, | ||
914 | .max_keysize = AES_MAX_KEY_SIZE, | ||
915 | .ivsize = AES_BLOCK_SIZE, | ||
916 | .setkey = atmel_aes_setkey, | ||
917 | .encrypt = atmel_aes_ctr_encrypt, | ||
918 | .decrypt = atmel_aes_ctr_decrypt, | ||
919 | } | ||
920 | }, | ||
921 | }; | ||
922 | |||
923 | static struct crypto_alg aes_cfb64_alg[] = { | ||
924 | { | ||
925 | .cra_name = "cfb64(aes)", | ||
926 | .cra_driver_name = "atmel-cfb64-aes", | ||
927 | .cra_priority = 100, | ||
928 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
929 | .cra_blocksize = CFB64_BLOCK_SIZE, | ||
930 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
931 | .cra_alignmask = 0x0, | ||
932 | .cra_type = &crypto_ablkcipher_type, | ||
933 | .cra_module = THIS_MODULE, | ||
934 | .cra_init = atmel_aes_cra_init, | ||
935 | .cra_exit = atmel_aes_cra_exit, | ||
936 | .cra_u.ablkcipher = { | ||
937 | .min_keysize = AES_MIN_KEY_SIZE, | ||
938 | .max_keysize = AES_MAX_KEY_SIZE, | ||
939 | .ivsize = AES_BLOCK_SIZE, | ||
940 | .setkey = atmel_aes_setkey, | ||
941 | .encrypt = atmel_aes_cfb64_encrypt, | ||
942 | .decrypt = atmel_aes_cfb64_decrypt, | ||
943 | } | ||
944 | }, | ||
945 | }; | ||
946 | |||
947 | static void atmel_aes_queue_task(unsigned long data) | ||
948 | { | ||
949 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; | ||
950 | |||
951 | atmel_aes_handle_queue(dd, NULL); | ||
952 | } | ||
953 | |||
954 | static void atmel_aes_done_task(unsigned long data) | ||
955 | { | ||
956 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data; | ||
957 | int err; | ||
958 | |||
959 | if (!(dd->flags & AES_FLAGS_DMA)) { | ||
960 | atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, | ||
961 | dd->bufcnt >> 2); | ||
962 | |||
963 | if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, | ||
964 | dd->buf_out, dd->bufcnt)) | ||
965 | err = 0; | ||
966 | else | ||
967 | err = -EINVAL; | ||
968 | |||
969 | goto cpu_end; | ||
970 | } | ||
971 | |||
972 | err = atmel_aes_crypt_dma_stop(dd); | ||
973 | |||
974 | err = dd->err ? : err; | ||
975 | |||
976 | if (dd->total && !err) { | ||
977 | err = atmel_aes_crypt_dma_start(dd); | ||
978 | if (!err) | ||
979 | return; /* DMA started. Not fininishing. */ | ||
980 | } | ||
981 | |||
982 | cpu_end: | ||
983 | atmel_aes_finish_req(dd, err); | ||
984 | atmel_aes_handle_queue(dd, NULL); | ||
985 | } | ||
986 | |||
987 | static irqreturn_t atmel_aes_irq(int irq, void *dev_id) | ||
988 | { | ||
989 | struct atmel_aes_dev *aes_dd = dev_id; | ||
990 | u32 reg; | ||
991 | |||
992 | reg = atmel_aes_read(aes_dd, AES_ISR); | ||
993 | if (reg & atmel_aes_read(aes_dd, AES_IMR)) { | ||
994 | atmel_aes_write(aes_dd, AES_IDR, reg); | ||
995 | if (AES_FLAGS_BUSY & aes_dd->flags) | ||
996 | tasklet_schedule(&aes_dd->done_task); | ||
997 | else | ||
998 | dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n"); | ||
999 | return IRQ_HANDLED; | ||
1000 | } | ||
1001 | |||
1002 | return IRQ_NONE; | ||
1003 | } | ||
1004 | |||
1005 | static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) | ||
1006 | { | ||
1007 | int i; | ||
1008 | |||
1009 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | ||
1010 | crypto_unregister_alg(&aes_algs[i]); | ||
1011 | if (dd->hw_version >= 0x130) | ||
1012 | crypto_unregister_alg(&aes_cfb64_alg[0]); | ||
1013 | } | ||
1014 | |||
1015 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | ||
1016 | { | ||
1017 | int err, i, j; | ||
1018 | |||
1019 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | ||
1020 | INIT_LIST_HEAD(&aes_algs[i].cra_list); | ||
1021 | err = crypto_register_alg(&aes_algs[i]); | ||
1022 | if (err) | ||
1023 | goto err_aes_algs; | ||
1024 | } | ||
1025 | |||
1026 | atmel_aes_hw_version_init(dd); | ||
1027 | |||
1028 | if (dd->hw_version >= 0x130) { | ||
1029 | INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list); | ||
1030 | err = crypto_register_alg(&aes_cfb64_alg[0]); | ||
1031 | if (err) | ||
1032 | goto err_aes_cfb64_alg; | ||
1033 | } | ||
1034 | |||
1035 | return 0; | ||
1036 | |||
1037 | err_aes_cfb64_alg: | ||
1038 | i = ARRAY_SIZE(aes_algs); | ||
1039 | err_aes_algs: | ||
1040 | for (j = 0; j < i; j++) | ||
1041 | crypto_unregister_alg(&aes_algs[j]); | ||
1042 | |||
1043 | return err; | ||
1044 | } | ||
1045 | |||
1046 | static int __devinit atmel_aes_probe(struct platform_device *pdev) | ||
1047 | { | ||
1048 | struct atmel_aes_dev *aes_dd; | ||
1049 | struct aes_platform_data *pdata; | ||
1050 | struct device *dev = &pdev->dev; | ||
1051 | struct resource *aes_res; | ||
1052 | unsigned long aes_phys_size; | ||
1053 | int err; | ||
1054 | |||
1055 | pdata = pdev->dev.platform_data; | ||
1056 | if (!pdata) { | ||
1057 | err = -ENXIO; | ||
1058 | goto aes_dd_err; | ||
1059 | } | ||
1060 | |||
1061 | aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL); | ||
1062 | if (aes_dd == NULL) { | ||
1063 | dev_err(dev, "unable to alloc data struct.\n"); | ||
1064 | err = -ENOMEM; | ||
1065 | goto aes_dd_err; | ||
1066 | } | ||
1067 | |||
1068 | aes_dd->dev = dev; | ||
1069 | |||
1070 | platform_set_drvdata(pdev, aes_dd); | ||
1071 | |||
1072 | INIT_LIST_HEAD(&aes_dd->list); | ||
1073 | |||
1074 | tasklet_init(&aes_dd->done_task, atmel_aes_done_task, | ||
1075 | (unsigned long)aes_dd); | ||
1076 | tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task, | ||
1077 | (unsigned long)aes_dd); | ||
1078 | |||
1079 | crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); | ||
1080 | |||
1081 | aes_dd->irq = -1; | ||
1082 | |||
1083 | /* Get the base address */ | ||
1084 | aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1085 | if (!aes_res) { | ||
1086 | dev_err(dev, "no MEM resource info\n"); | ||
1087 | err = -ENODEV; | ||
1088 | goto res_err; | ||
1089 | } | ||
1090 | aes_dd->phys_base = aes_res->start; | ||
1091 | aes_phys_size = resource_size(aes_res); | ||
1092 | |||
1093 | /* Get the IRQ */ | ||
1094 | aes_dd->irq = platform_get_irq(pdev, 0); | ||
1095 | if (aes_dd->irq < 0) { | ||
1096 | dev_err(dev, "no IRQ resource info\n"); | ||
1097 | err = aes_dd->irq; | ||
1098 | goto aes_irq_err; | ||
1099 | } | ||
1100 | |||
1101 | err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes", | ||
1102 | aes_dd); | ||
1103 | if (err) { | ||
1104 | dev_err(dev, "unable to request aes irq.\n"); | ||
1105 | goto aes_irq_err; | ||
1106 | } | ||
1107 | |||
1108 | /* Initializing the clock */ | ||
1109 | aes_dd->iclk = clk_get(&pdev->dev, NULL); | ||
1110 | if (IS_ERR(aes_dd->iclk)) { | ||
1111 | dev_err(dev, "clock intialization failed.\n"); | ||
1112 | err = PTR_ERR(aes_dd->iclk); | ||
1113 | goto clk_err; | ||
1114 | } | ||
1115 | |||
1116 | aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size); | ||
1117 | if (!aes_dd->io_base) { | ||
1118 | dev_err(dev, "can't ioremap\n"); | ||
1119 | err = -ENOMEM; | ||
1120 | goto aes_io_err; | ||
1121 | } | ||
1122 | |||
1123 | err = atmel_aes_dma_init(aes_dd); | ||
1124 | if (err) | ||
1125 | goto err_aes_dma; | ||
1126 | |||
1127 | spin_lock(&atmel_aes.lock); | ||
1128 | list_add_tail(&aes_dd->list, &atmel_aes.dev_list); | ||
1129 | spin_unlock(&atmel_aes.lock); | ||
1130 | |||
1131 | err = atmel_aes_register_algs(aes_dd); | ||
1132 | if (err) | ||
1133 | goto err_algs; | ||
1134 | |||
1135 | dev_info(dev, "Atmel AES\n"); | ||
1136 | |||
1137 | return 0; | ||
1138 | |||
1139 | err_algs: | ||
1140 | spin_lock(&atmel_aes.lock); | ||
1141 | list_del(&aes_dd->list); | ||
1142 | spin_unlock(&atmel_aes.lock); | ||
1143 | atmel_aes_dma_cleanup(aes_dd); | ||
1144 | err_aes_dma: | ||
1145 | iounmap(aes_dd->io_base); | ||
1146 | aes_io_err: | ||
1147 | clk_put(aes_dd->iclk); | ||
1148 | clk_err: | ||
1149 | free_irq(aes_dd->irq, aes_dd); | ||
1150 | aes_irq_err: | ||
1151 | res_err: | ||
1152 | tasklet_kill(&aes_dd->done_task); | ||
1153 | tasklet_kill(&aes_dd->queue_task); | ||
1154 | kfree(aes_dd); | ||
1155 | aes_dd = NULL; | ||
1156 | aes_dd_err: | ||
1157 | dev_err(dev, "initialization failed.\n"); | ||
1158 | |||
1159 | return err; | ||
1160 | } | ||
1161 | |||
1162 | static int __devexit atmel_aes_remove(struct platform_device *pdev) | ||
1163 | { | ||
1164 | static struct atmel_aes_dev *aes_dd; | ||
1165 | |||
1166 | aes_dd = platform_get_drvdata(pdev); | ||
1167 | if (!aes_dd) | ||
1168 | return -ENODEV; | ||
1169 | spin_lock(&atmel_aes.lock); | ||
1170 | list_del(&aes_dd->list); | ||
1171 | spin_unlock(&atmel_aes.lock); | ||
1172 | |||
1173 | atmel_aes_unregister_algs(aes_dd); | ||
1174 | |||
1175 | tasklet_kill(&aes_dd->done_task); | ||
1176 | tasklet_kill(&aes_dd->queue_task); | ||
1177 | |||
1178 | atmel_aes_dma_cleanup(aes_dd); | ||
1179 | |||
1180 | iounmap(aes_dd->io_base); | ||
1181 | |||
1182 | clk_put(aes_dd->iclk); | ||
1183 | |||
1184 | if (aes_dd->irq > 0) | ||
1185 | free_irq(aes_dd->irq, aes_dd); | ||
1186 | |||
1187 | kfree(aes_dd); | ||
1188 | aes_dd = NULL; | ||
1189 | |||
1190 | return 0; | ||
1191 | } | ||
1192 | |||
1193 | static struct platform_driver atmel_aes_driver = { | ||
1194 | .probe = atmel_aes_probe, | ||
1195 | .remove = __devexit_p(atmel_aes_remove), | ||
1196 | .driver = { | ||
1197 | .name = "atmel_aes", | ||
1198 | .owner = THIS_MODULE, | ||
1199 | }, | ||
1200 | }; | ||
1201 | |||
1202 | module_platform_driver(atmel_aes_driver); | ||
1203 | |||
1204 | MODULE_DESCRIPTION("Atmel AES hw acceleration support."); | ||
1205 | MODULE_LICENSE("GPL v2"); | ||
1206 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); | ||
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h new file mode 100644 index 000000000000..dc53a20d7da1 --- /dev/null +++ b/drivers/crypto/atmel-sha-regs.h | |||
@@ -0,0 +1,46 @@ | |||
1 | #ifndef __ATMEL_SHA_REGS_H__ | ||
2 | #define __ATMEL_SHA_REGS_H__ | ||
3 | |||
4 | #define SHA_REG_DIGEST(x) (0x80 + ((x) * 0x04)) | ||
5 | #define SHA_REG_DIN(x) (0x40 + ((x) * 0x04)) | ||
6 | |||
7 | #define SHA_CR 0x00 | ||
8 | #define SHA_CR_START (1 << 0) | ||
9 | #define SHA_CR_FIRST (1 << 4) | ||
10 | #define SHA_CR_SWRST (1 << 8) | ||
11 | |||
12 | #define SHA_MR 0x04 | ||
13 | #define SHA_MR_MODE_MASK (0x3 << 0) | ||
14 | #define SHA_MR_MODE_MANUAL 0x0 | ||
15 | #define SHA_MR_MODE_AUTO 0x1 | ||
16 | #define SHA_MR_MODE_PDC 0x2 | ||
17 | #define SHA_MR_DUALBUFF (1 << 3) | ||
18 | #define SHA_MR_PROCDLY (1 << 4) | ||
19 | #define SHA_MR_ALGO_SHA1 (0 << 8) | ||
20 | #define SHA_MR_ALGO_SHA256 (1 << 8) | ||
21 | |||
22 | #define SHA_IER 0x10 | ||
23 | #define SHA_IDR 0x14 | ||
24 | #define SHA_IMR 0x18 | ||
25 | #define SHA_ISR 0x1C | ||
26 | #define SHA_INT_DATARDY (1 << 0) | ||
27 | #define SHA_INT_ENDTX (1 << 1) | ||
28 | #define SHA_INT_TXBUFE (1 << 2) | ||
29 | #define SHA_INT_URAD (1 << 8) | ||
30 | #define SHA_ISR_URAT_MASK (0x7 << 12) | ||
31 | #define SHA_ISR_URAT_IDR (0x0 << 12) | ||
32 | #define SHA_ISR_URAT_ODR (0x1 << 12) | ||
33 | #define SHA_ISR_URAT_MR (0x2 << 12) | ||
34 | #define SHA_ISR_URAT_WO (0x5 << 12) | ||
35 | |||
36 | #define SHA_TPR 0x108 | ||
37 | #define SHA_TCR 0x10C | ||
38 | #define SHA_TNPR 0x118 | ||
39 | #define SHA_TNCR 0x11C | ||
40 | #define SHA_PTCR 0x120 | ||
41 | #define SHA_PTCR_TXTEN (1 << 8) | ||
42 | #define SHA_PTCR_TXTDIS (1 << 9) | ||
43 | #define SHA_PTSR 0x124 | ||
44 | #define SHA_PTSR_TXTEN (1 << 8) | ||
45 | |||
46 | #endif /* __ATMEL_SHA_REGS_H__ */ | ||
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c new file mode 100644 index 000000000000..f938b9d79b66 --- /dev/null +++ b/drivers/crypto/atmel-sha.c | |||
@@ -0,0 +1,1112 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for ATMEL SHA1/SHA256 HW acceleration. | ||
5 | * | ||
6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | ||
7 | * Author: Nicolas Royer <nicolas@eukrea.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * Some ideas are from omap-sham.c drivers. | ||
14 | */ | ||
15 | |||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/hw_random.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | |||
26 | #include <linux/device.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/clk.h> | ||
33 | #include <linux/irq.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/scatterlist.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/crypto.h> | ||
40 | #include <linux/cryptohash.h> | ||
41 | #include <crypto/scatterwalk.h> | ||
42 | #include <crypto/algapi.h> | ||
43 | #include <crypto/sha.h> | ||
44 | #include <crypto/hash.h> | ||
45 | #include <crypto/internal/hash.h> | ||
46 | #include "atmel-sha-regs.h" | ||
47 | |||
48 | /* SHA flags */ | ||
49 | #define SHA_FLAGS_BUSY BIT(0) | ||
50 | #define SHA_FLAGS_FINAL BIT(1) | ||
51 | #define SHA_FLAGS_DMA_ACTIVE BIT(2) | ||
52 | #define SHA_FLAGS_OUTPUT_READY BIT(3) | ||
53 | #define SHA_FLAGS_INIT BIT(4) | ||
54 | #define SHA_FLAGS_CPU BIT(5) | ||
55 | #define SHA_FLAGS_DMA_READY BIT(6) | ||
56 | |||
57 | #define SHA_FLAGS_FINUP BIT(16) | ||
58 | #define SHA_FLAGS_SG BIT(17) | ||
59 | #define SHA_FLAGS_SHA1 BIT(18) | ||
60 | #define SHA_FLAGS_SHA256 BIT(19) | ||
61 | #define SHA_FLAGS_ERROR BIT(20) | ||
62 | #define SHA_FLAGS_PAD BIT(21) | ||
63 | |||
64 | #define SHA_FLAGS_DUALBUFF BIT(24) | ||
65 | |||
66 | #define SHA_OP_UPDATE 1 | ||
67 | #define SHA_OP_FINAL 2 | ||
68 | |||
69 | #define SHA_BUFFER_LEN PAGE_SIZE | ||
70 | |||
71 | #define ATMEL_SHA_DMA_THRESHOLD 56 | ||
72 | |||
73 | |||
74 | struct atmel_sha_dev; | ||
75 | |||
76 | struct atmel_sha_reqctx { | ||
77 | struct atmel_sha_dev *dd; | ||
78 | unsigned long flags; | ||
79 | unsigned long op; | ||
80 | |||
81 | u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); | ||
82 | size_t digcnt; | ||
83 | size_t bufcnt; | ||
84 | size_t buflen; | ||
85 | dma_addr_t dma_addr; | ||
86 | |||
87 | /* walk state */ | ||
88 | struct scatterlist *sg; | ||
89 | unsigned int offset; /* offset in current sg */ | ||
90 | unsigned int total; /* total request */ | ||
91 | |||
92 | u8 buffer[0] __aligned(sizeof(u32)); | ||
93 | }; | ||
94 | |||
95 | struct atmel_sha_ctx { | ||
96 | struct atmel_sha_dev *dd; | ||
97 | |||
98 | unsigned long flags; | ||
99 | |||
100 | /* fallback stuff */ | ||
101 | struct crypto_shash *fallback; | ||
102 | |||
103 | }; | ||
104 | |||
105 | #define ATMEL_SHA_QUEUE_LENGTH 1 | ||
106 | |||
107 | struct atmel_sha_dev { | ||
108 | struct list_head list; | ||
109 | unsigned long phys_base; | ||
110 | struct device *dev; | ||
111 | struct clk *iclk; | ||
112 | int irq; | ||
113 | void __iomem *io_base; | ||
114 | |||
115 | spinlock_t lock; | ||
116 | int err; | ||
117 | struct tasklet_struct done_task; | ||
118 | |||
119 | unsigned long flags; | ||
120 | struct crypto_queue queue; | ||
121 | struct ahash_request *req; | ||
122 | }; | ||
123 | |||
124 | struct atmel_sha_drv { | ||
125 | struct list_head dev_list; | ||
126 | spinlock_t lock; | ||
127 | }; | ||
128 | |||
129 | static struct atmel_sha_drv atmel_sha = { | ||
130 | .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list), | ||
131 | .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock), | ||
132 | }; | ||
133 | |||
134 | static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) | ||
135 | { | ||
136 | return readl_relaxed(dd->io_base + offset); | ||
137 | } | ||
138 | |||
139 | static inline void atmel_sha_write(struct atmel_sha_dev *dd, | ||
140 | u32 offset, u32 value) | ||
141 | { | ||
142 | writel_relaxed(value, dd->io_base + offset); | ||
143 | } | ||
144 | |||
145 | static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd) | ||
146 | { | ||
147 | atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF); | ||
148 | |||
149 | if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF) | ||
150 | dd->flags |= SHA_FLAGS_DUALBUFF; | ||
151 | } | ||
152 | |||
153 | static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) | ||
154 | { | ||
155 | size_t count; | ||
156 | |||
157 | while ((ctx->bufcnt < ctx->buflen) && ctx->total) { | ||
158 | count = min(ctx->sg->length - ctx->offset, ctx->total); | ||
159 | count = min(count, ctx->buflen - ctx->bufcnt); | ||
160 | |||
161 | if (count <= 0) | ||
162 | break; | ||
163 | |||
164 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, | ||
165 | ctx->offset, count, 0); | ||
166 | |||
167 | ctx->bufcnt += count; | ||
168 | ctx->offset += count; | ||
169 | ctx->total -= count; | ||
170 | |||
171 | if (ctx->offset == ctx->sg->length) { | ||
172 | ctx->sg = sg_next(ctx->sg); | ||
173 | if (ctx->sg) | ||
174 | ctx->offset = 0; | ||
175 | else | ||
176 | ctx->total = 0; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * The purpose of this padding is to ensure that the padded message | ||
185 | * is a multiple of 512 bits. The bit "1" is appended at the end of | ||
186 | * the message followed by "padlen-1" zero bits. Then a 64 bits block | ||
187 | * equals to the message length in bits is appended. | ||
188 | * | ||
189 | * padlen is calculated as followed: | ||
190 | * - if message length < 56 bytes then padlen = 56 - message length | ||
191 | * - else padlen = 64 + 56 - message length | ||
192 | */ | ||
193 | static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) | ||
194 | { | ||
195 | unsigned int index, padlen; | ||
196 | u64 bits; | ||
197 | u64 size; | ||
198 | |||
199 | bits = (ctx->bufcnt + ctx->digcnt + length) << 3; | ||
200 | size = cpu_to_be64(bits); | ||
201 | |||
202 | index = ctx->bufcnt & 0x3f; | ||
203 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | ||
204 | *(ctx->buffer + ctx->bufcnt) = 0x80; | ||
205 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | ||
206 | memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8); | ||
207 | ctx->bufcnt += padlen + 8; | ||
208 | ctx->flags |= SHA_FLAGS_PAD; | ||
209 | } | ||
210 | |||
211 | static int atmel_sha_init(struct ahash_request *req) | ||
212 | { | ||
213 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
214 | struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); | ||
215 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
216 | struct atmel_sha_dev *dd = NULL; | ||
217 | struct atmel_sha_dev *tmp; | ||
218 | |||
219 | spin_lock_bh(&atmel_sha.lock); | ||
220 | if (!tctx->dd) { | ||
221 | list_for_each_entry(tmp, &atmel_sha.dev_list, list) { | ||
222 | dd = tmp; | ||
223 | break; | ||
224 | } | ||
225 | tctx->dd = dd; | ||
226 | } else { | ||
227 | dd = tctx->dd; | ||
228 | } | ||
229 | |||
230 | spin_unlock_bh(&atmel_sha.lock); | ||
231 | |||
232 | ctx->dd = dd; | ||
233 | |||
234 | ctx->flags = 0; | ||
235 | |||
236 | dev_dbg(dd->dev, "init: digest size: %d\n", | ||
237 | crypto_ahash_digestsize(tfm)); | ||
238 | |||
239 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | ||
240 | ctx->flags |= SHA_FLAGS_SHA1; | ||
241 | else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE) | ||
242 | ctx->flags |= SHA_FLAGS_SHA256; | ||
243 | |||
244 | ctx->bufcnt = 0; | ||
245 | ctx->digcnt = 0; | ||
246 | ctx->buflen = SHA_BUFFER_LEN; | ||
247 | |||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) | ||
252 | { | ||
253 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
254 | u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; | ||
255 | |||
256 | if (likely(dma)) { | ||
257 | atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); | ||
258 | valmr = SHA_MR_MODE_PDC; | ||
259 | if (dd->flags & SHA_FLAGS_DUALBUFF) | ||
260 | valmr = SHA_MR_DUALBUFF; | ||
261 | } else { | ||
262 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | ||
263 | } | ||
264 | |||
265 | if (ctx->flags & SHA_FLAGS_SHA256) | ||
266 | valmr |= SHA_MR_ALGO_SHA256; | ||
267 | |||
268 | /* Setting CR_FIRST only for the first iteration */ | ||
269 | if (!ctx->digcnt) | ||
270 | valcr = SHA_CR_FIRST; | ||
271 | |||
272 | atmel_sha_write(dd, SHA_CR, valcr); | ||
273 | atmel_sha_write(dd, SHA_MR, valmr); | ||
274 | } | ||
275 | |||
276 | static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, | ||
277 | size_t length, int final) | ||
278 | { | ||
279 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
280 | int count, len32; | ||
281 | const u32 *buffer = (const u32 *)buf; | ||
282 | |||
283 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | ||
284 | ctx->digcnt, length, final); | ||
285 | |||
286 | atmel_sha_write_ctrl(dd, 0); | ||
287 | |||
288 | /* should be non-zero before next lines to disable clocks later */ | ||
289 | ctx->digcnt += length; | ||
290 | |||
291 | if (final) | ||
292 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | ||
293 | |||
294 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
295 | |||
296 | dd->flags |= SHA_FLAGS_CPU; | ||
297 | |||
298 | for (count = 0; count < len32; count++) | ||
299 | atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); | ||
300 | |||
301 | return -EINPROGRESS; | ||
302 | } | ||
303 | |||
304 | static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | ||
305 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | ||
306 | { | ||
307 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
308 | int len32; | ||
309 | |||
310 | dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n", | ||
311 | ctx->digcnt, length1, final); | ||
312 | |||
313 | len32 = DIV_ROUND_UP(length1, sizeof(u32)); | ||
314 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); | ||
315 | atmel_sha_write(dd, SHA_TPR, dma_addr1); | ||
316 | atmel_sha_write(dd, SHA_TCR, len32); | ||
317 | |||
318 | len32 = DIV_ROUND_UP(length2, sizeof(u32)); | ||
319 | atmel_sha_write(dd, SHA_TNPR, dma_addr2); | ||
320 | atmel_sha_write(dd, SHA_TNCR, len32); | ||
321 | |||
322 | atmel_sha_write_ctrl(dd, 1); | ||
323 | |||
324 | /* should be non-zero before next lines to disable clocks later */ | ||
325 | ctx->digcnt += length1; | ||
326 | |||
327 | if (final) | ||
328 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | ||
329 | |||
330 | dd->flags |= SHA_FLAGS_DMA_ACTIVE; | ||
331 | |||
332 | /* Start DMA transfer */ | ||
333 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); | ||
334 | |||
335 | return -EINPROGRESS; | ||
336 | } | ||
337 | |||
338 | static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) | ||
339 | { | ||
340 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
341 | int bufcnt; | ||
342 | |||
343 | atmel_sha_append_sg(ctx); | ||
344 | atmel_sha_fill_padding(ctx, 0); | ||
345 | |||
346 | bufcnt = ctx->bufcnt; | ||
347 | ctx->bufcnt = 0; | ||
348 | |||
349 | return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); | ||
350 | } | ||
351 | |||
352 | static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, | ||
353 | struct atmel_sha_reqctx *ctx, | ||
354 | size_t length, int final) | ||
355 | { | ||
356 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | ||
357 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | ||
358 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
359 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + | ||
360 | SHA1_BLOCK_SIZE); | ||
361 | return -EINVAL; | ||
362 | } | ||
363 | |||
364 | ctx->flags &= ~SHA_FLAGS_SG; | ||
365 | |||
366 | /* next call does not fail... so no unmap in the case of error */ | ||
367 | return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final); | ||
368 | } | ||
369 | |||
370 | static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) | ||
371 | { | ||
372 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
373 | unsigned int final; | ||
374 | size_t count; | ||
375 | |||
376 | atmel_sha_append_sg(ctx); | ||
377 | |||
378 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | ||
379 | |||
380 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | ||
381 | ctx->bufcnt, ctx->digcnt, final); | ||
382 | |||
383 | if (final) | ||
384 | atmel_sha_fill_padding(ctx, 0); | ||
385 | |||
386 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | ||
387 | count = ctx->bufcnt; | ||
388 | ctx->bufcnt = 0; | ||
389 | return atmel_sha_xmit_dma_map(dd, ctx, count, final); | ||
390 | } | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | ||
396 | { | ||
397 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
398 | unsigned int length, final, tail; | ||
399 | struct scatterlist *sg; | ||
400 | unsigned int count; | ||
401 | |||
402 | if (!ctx->total) | ||
403 | return 0; | ||
404 | |||
405 | if (ctx->bufcnt || ctx->offset) | ||
406 | return atmel_sha_update_dma_slow(dd); | ||
407 | |||
408 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | ||
409 | ctx->digcnt, ctx->bufcnt, ctx->total); | ||
410 | |||
411 | sg = ctx->sg; | ||
412 | |||
413 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | ||
414 | return atmel_sha_update_dma_slow(dd); | ||
415 | |||
416 | if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE)) | ||
417 | /* size is not SHA1_BLOCK_SIZE aligned */ | ||
418 | return atmel_sha_update_dma_slow(dd); | ||
419 | |||
420 | length = min(ctx->total, sg->length); | ||
421 | |||
422 | if (sg_is_last(sg)) { | ||
423 | if (!(ctx->flags & SHA_FLAGS_FINUP)) { | ||
424 | /* not last sg must be SHA1_BLOCK_SIZE aligned */ | ||
425 | tail = length & (SHA1_BLOCK_SIZE - 1); | ||
426 | length -= tail; | ||
427 | if (length == 0) { | ||
428 | /* offset where to start slow */ | ||
429 | ctx->offset = length; | ||
430 | return atmel_sha_update_dma_slow(dd); | ||
431 | } | ||
432 | } | ||
433 | } | ||
434 | |||
435 | ctx->total -= length; | ||
436 | ctx->offset = length; /* offset where to start slow */ | ||
437 | |||
438 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | ||
439 | |||
440 | /* Add padding */ | ||
441 | if (final) { | ||
442 | tail = length & (SHA1_BLOCK_SIZE - 1); | ||
443 | length -= tail; | ||
444 | ctx->total += tail; | ||
445 | ctx->offset = length; /* offset where to start slow */ | ||
446 | |||
447 | sg = ctx->sg; | ||
448 | atmel_sha_append_sg(ctx); | ||
449 | |||
450 | atmel_sha_fill_padding(ctx, length); | ||
451 | |||
452 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | ||
453 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | ||
454 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
455 | dev_err(dd->dev, "dma %u bytes error\n", | ||
456 | ctx->buflen + SHA1_BLOCK_SIZE); | ||
457 | return -EINVAL; | ||
458 | } | ||
459 | |||
460 | if (length == 0) { | ||
461 | ctx->flags &= ~SHA_FLAGS_SG; | ||
462 | count = ctx->bufcnt; | ||
463 | ctx->bufcnt = 0; | ||
464 | return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0, | ||
465 | 0, final); | ||
466 | } else { | ||
467 | ctx->sg = sg; | ||
468 | if (!dma_map_sg(dd->dev, ctx->sg, 1, | ||
469 | DMA_TO_DEVICE)) { | ||
470 | dev_err(dd->dev, "dma_map_sg error\n"); | ||
471 | return -EINVAL; | ||
472 | } | ||
473 | |||
474 | ctx->flags |= SHA_FLAGS_SG; | ||
475 | |||
476 | count = ctx->bufcnt; | ||
477 | ctx->bufcnt = 0; | ||
478 | return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), | ||
479 | length, ctx->dma_addr, count, final); | ||
480 | } | ||
481 | } | ||
482 | |||
483 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | ||
484 | dev_err(dd->dev, "dma_map_sg error\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | ctx->flags |= SHA_FLAGS_SG; | ||
489 | |||
490 | /* next call does not fail... so no unmap in the case of error */ | ||
491 | return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0, | ||
492 | 0, final); | ||
493 | } | ||
494 | |||
495 | static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) | ||
496 | { | ||
497 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
498 | |||
499 | if (ctx->flags & SHA_FLAGS_SG) { | ||
500 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | ||
501 | if (ctx->sg->length == ctx->offset) { | ||
502 | ctx->sg = sg_next(ctx->sg); | ||
503 | if (ctx->sg) | ||
504 | ctx->offset = 0; | ||
505 | } | ||
506 | if (ctx->flags & SHA_FLAGS_PAD) | ||
507 | dma_unmap_single(dd->dev, ctx->dma_addr, | ||
508 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | ||
509 | } else { | ||
510 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + | ||
511 | SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | ||
512 | } | ||
513 | |||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static int atmel_sha_update_req(struct atmel_sha_dev *dd) | ||
518 | { | ||
519 | struct ahash_request *req = dd->req; | ||
520 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
521 | int err; | ||
522 | |||
523 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | ||
524 | ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0); | ||
525 | |||
526 | if (ctx->flags & SHA_FLAGS_CPU) | ||
527 | err = atmel_sha_update_cpu(dd); | ||
528 | else | ||
529 | err = atmel_sha_update_dma_start(dd); | ||
530 | |||
531 | /* wait for dma completion before can take more data */ | ||
532 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", | ||
533 | err, ctx->digcnt); | ||
534 | |||
535 | return err; | ||
536 | } | ||
537 | |||
538 | static int atmel_sha_final_req(struct atmel_sha_dev *dd) | ||
539 | { | ||
540 | struct ahash_request *req = dd->req; | ||
541 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
542 | int err = 0; | ||
543 | int count; | ||
544 | |||
545 | if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { | ||
546 | atmel_sha_fill_padding(ctx, 0); | ||
547 | count = ctx->bufcnt; | ||
548 | ctx->bufcnt = 0; | ||
549 | err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); | ||
550 | } | ||
551 | /* faster to handle last block with cpu */ | ||
552 | else { | ||
553 | atmel_sha_fill_padding(ctx, 0); | ||
554 | count = ctx->bufcnt; | ||
555 | ctx->bufcnt = 0; | ||
556 | err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); | ||
557 | } | ||
558 | |||
559 | dev_dbg(dd->dev, "final_req: err: %d\n", err); | ||
560 | |||
561 | return err; | ||
562 | } | ||
563 | |||
564 | static void atmel_sha_copy_hash(struct ahash_request *req) | ||
565 | { | ||
566 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
567 | u32 *hash = (u32 *)ctx->digest; | ||
568 | int i; | ||
569 | |||
570 | if (likely(ctx->flags & SHA_FLAGS_SHA1)) | ||
571 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | ||
572 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
573 | else | ||
574 | for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) | ||
575 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
576 | } | ||
577 | |||
578 | static void atmel_sha_copy_ready_hash(struct ahash_request *req) | ||
579 | { | ||
580 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
581 | |||
582 | if (!req->result) | ||
583 | return; | ||
584 | |||
585 | if (likely(ctx->flags & SHA_FLAGS_SHA1)) | ||
586 | memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); | ||
587 | else | ||
588 | memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); | ||
589 | } | ||
590 | |||
591 | static int atmel_sha_finish(struct ahash_request *req) | ||
592 | { | ||
593 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
594 | struct atmel_sha_dev *dd = ctx->dd; | ||
595 | int err = 0; | ||
596 | |||
597 | if (ctx->digcnt) | ||
598 | atmel_sha_copy_ready_hash(req); | ||
599 | |||
600 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, | ||
601 | ctx->bufcnt); | ||
602 | |||
603 | return err; | ||
604 | } | ||
605 | |||
606 | static void atmel_sha_finish_req(struct ahash_request *req, int err) | ||
607 | { | ||
608 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
609 | struct atmel_sha_dev *dd = ctx->dd; | ||
610 | |||
611 | if (!err) { | ||
612 | atmel_sha_copy_hash(req); | ||
613 | if (SHA_FLAGS_FINAL & dd->flags) | ||
614 | err = atmel_sha_finish(req); | ||
615 | } else { | ||
616 | ctx->flags |= SHA_FLAGS_ERROR; | ||
617 | } | ||
618 | |||
619 | /* atomic operation is not needed here */ | ||
620 | dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | | ||
621 | SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); | ||
622 | |||
623 | clk_disable_unprepare(dd->iclk); | ||
624 | |||
625 | if (req->base.complete) | ||
626 | req->base.complete(&req->base, err); | ||
627 | |||
628 | /* handle new request */ | ||
629 | tasklet_schedule(&dd->done_task); | ||
630 | } | ||
631 | |||
632 | static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | ||
633 | { | ||
634 | clk_prepare_enable(dd->iclk); | ||
635 | |||
636 | if (SHA_FLAGS_INIT & dd->flags) { | ||
637 | atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); | ||
638 | atmel_sha_dualbuff_test(dd); | ||
639 | dd->flags |= SHA_FLAGS_INIT; | ||
640 | dd->err = 0; | ||
641 | } | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, | ||
647 | struct ahash_request *req) | ||
648 | { | ||
649 | struct crypto_async_request *async_req, *backlog; | ||
650 | struct atmel_sha_reqctx *ctx; | ||
651 | unsigned long flags; | ||
652 | int err = 0, ret = 0; | ||
653 | |||
654 | spin_lock_irqsave(&dd->lock, flags); | ||
655 | if (req) | ||
656 | ret = ahash_enqueue_request(&dd->queue, req); | ||
657 | |||
658 | if (SHA_FLAGS_BUSY & dd->flags) { | ||
659 | spin_unlock_irqrestore(&dd->lock, flags); | ||
660 | return ret; | ||
661 | } | ||
662 | |||
663 | backlog = crypto_get_backlog(&dd->queue); | ||
664 | async_req = crypto_dequeue_request(&dd->queue); | ||
665 | if (async_req) | ||
666 | dd->flags |= SHA_FLAGS_BUSY; | ||
667 | |||
668 | spin_unlock_irqrestore(&dd->lock, flags); | ||
669 | |||
670 | if (!async_req) | ||
671 | return ret; | ||
672 | |||
673 | if (backlog) | ||
674 | backlog->complete(backlog, -EINPROGRESS); | ||
675 | |||
676 | req = ahash_request_cast(async_req); | ||
677 | dd->req = req; | ||
678 | ctx = ahash_request_ctx(req); | ||
679 | |||
680 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | ||
681 | ctx->op, req->nbytes); | ||
682 | |||
683 | err = atmel_sha_hw_init(dd); | ||
684 | |||
685 | if (err) | ||
686 | goto err1; | ||
687 | |||
688 | if (ctx->op == SHA_OP_UPDATE) { | ||
689 | err = atmel_sha_update_req(dd); | ||
690 | if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) { | ||
691 | /* no final() after finup() */ | ||
692 | err = atmel_sha_final_req(dd); | ||
693 | } | ||
694 | } else if (ctx->op == SHA_OP_FINAL) { | ||
695 | err = atmel_sha_final_req(dd); | ||
696 | } | ||
697 | |||
698 | err1: | ||
699 | if (err != -EINPROGRESS) | ||
700 | /* done_task will not finish it, so do it here */ | ||
701 | atmel_sha_finish_req(req, err); | ||
702 | |||
703 | dev_dbg(dd->dev, "exit, err: %d\n", err); | ||
704 | |||
705 | return ret; | ||
706 | } | ||
707 | |||
708 | static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op) | ||
709 | { | ||
710 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
711 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
712 | struct atmel_sha_dev *dd = tctx->dd; | ||
713 | |||
714 | ctx->op = op; | ||
715 | |||
716 | return atmel_sha_handle_queue(dd, req); | ||
717 | } | ||
718 | |||
719 | static int atmel_sha_update(struct ahash_request *req) | ||
720 | { | ||
721 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
722 | |||
723 | if (!req->nbytes) | ||
724 | return 0; | ||
725 | |||
726 | ctx->total = req->nbytes; | ||
727 | ctx->sg = req->src; | ||
728 | ctx->offset = 0; | ||
729 | |||
730 | if (ctx->flags & SHA_FLAGS_FINUP) { | ||
731 | if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) | ||
732 | /* faster to use CPU for short transfers */ | ||
733 | ctx->flags |= SHA_FLAGS_CPU; | ||
734 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | ||
735 | atmel_sha_append_sg(ctx); | ||
736 | return 0; | ||
737 | } | ||
738 | return atmel_sha_enqueue(req, SHA_OP_UPDATE); | ||
739 | } | ||
740 | |||
741 | static int atmel_sha_final(struct ahash_request *req) | ||
742 | { | ||
743 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
744 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
745 | struct atmel_sha_dev *dd = tctx->dd; | ||
746 | |||
747 | int err = 0; | ||
748 | |||
749 | ctx->flags |= SHA_FLAGS_FINUP; | ||
750 | |||
751 | if (ctx->flags & SHA_FLAGS_ERROR) | ||
752 | return 0; /* uncompleted hash is not needed */ | ||
753 | |||
754 | if (ctx->bufcnt) { | ||
755 | return atmel_sha_enqueue(req, SHA_OP_FINAL); | ||
756 | } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */ | ||
757 | err = atmel_sha_hw_init(dd); | ||
758 | if (err) | ||
759 | goto err1; | ||
760 | |||
761 | dd->flags |= SHA_FLAGS_BUSY; | ||
762 | err = atmel_sha_final_req(dd); | ||
763 | } else { | ||
764 | /* copy ready hash (+ finalize hmac) */ | ||
765 | return atmel_sha_finish(req); | ||
766 | } | ||
767 | |||
768 | err1: | ||
769 | if (err != -EINPROGRESS) | ||
770 | /* done_task will not finish it, so do it here */ | ||
771 | atmel_sha_finish_req(req, err); | ||
772 | |||
773 | return err; | ||
774 | } | ||
775 | |||
776 | static int atmel_sha_finup(struct ahash_request *req) | ||
777 | { | ||
778 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
779 | int err1, err2; | ||
780 | |||
781 | ctx->flags |= SHA_FLAGS_FINUP; | ||
782 | |||
783 | err1 = atmel_sha_update(req); | ||
784 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | ||
785 | return err1; | ||
786 | |||
787 | /* | ||
788 | * final() has to be always called to cleanup resources | ||
789 | * even if udpate() failed, except EINPROGRESS | ||
790 | */ | ||
791 | err2 = atmel_sha_final(req); | ||
792 | |||
793 | return err1 ?: err2; | ||
794 | } | ||
795 | |||
796 | static int atmel_sha_digest(struct ahash_request *req) | ||
797 | { | ||
798 | return atmel_sha_init(req) ?: atmel_sha_finup(req); | ||
799 | } | ||
800 | |||
801 | static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | ||
802 | { | ||
803 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm); | ||
804 | const char *alg_name = crypto_tfm_alg_name(tfm); | ||
805 | |||
806 | /* Allocate a fallback and abort if it failed. */ | ||
807 | tctx->fallback = crypto_alloc_shash(alg_name, 0, | ||
808 | CRYPTO_ALG_NEED_FALLBACK); | ||
809 | if (IS_ERR(tctx->fallback)) { | ||
810 | pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n", | ||
811 | alg_name); | ||
812 | return PTR_ERR(tctx->fallback); | ||
813 | } | ||
814 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
815 | sizeof(struct atmel_sha_reqctx) + | ||
816 | SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); | ||
817 | |||
818 | return 0; | ||
819 | } | ||
820 | |||
821 | static int atmel_sha_cra_init(struct crypto_tfm *tfm) | ||
822 | { | ||
823 | return atmel_sha_cra_init_alg(tfm, NULL); | ||
824 | } | ||
825 | |||
826 | static void atmel_sha_cra_exit(struct crypto_tfm *tfm) | ||
827 | { | ||
828 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm); | ||
829 | |||
830 | crypto_free_shash(tctx->fallback); | ||
831 | tctx->fallback = NULL; | ||
832 | } | ||
833 | |||
834 | static struct ahash_alg sha_algs[] = { | ||
835 | { | ||
836 | .init = atmel_sha_init, | ||
837 | .update = atmel_sha_update, | ||
838 | .final = atmel_sha_final, | ||
839 | .finup = atmel_sha_finup, | ||
840 | .digest = atmel_sha_digest, | ||
841 | .halg = { | ||
842 | .digestsize = SHA1_DIGEST_SIZE, | ||
843 | .base = { | ||
844 | .cra_name = "sha1", | ||
845 | .cra_driver_name = "atmel-sha1", | ||
846 | .cra_priority = 100, | ||
847 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
848 | CRYPTO_ALG_NEED_FALLBACK, | ||
849 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
850 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
851 | .cra_alignmask = 0, | ||
852 | .cra_module = THIS_MODULE, | ||
853 | .cra_init = atmel_sha_cra_init, | ||
854 | .cra_exit = atmel_sha_cra_exit, | ||
855 | } | ||
856 | } | ||
857 | }, | ||
858 | { | ||
859 | .init = atmel_sha_init, | ||
860 | .update = atmel_sha_update, | ||
861 | .final = atmel_sha_final, | ||
862 | .finup = atmel_sha_finup, | ||
863 | .digest = atmel_sha_digest, | ||
864 | .halg = { | ||
865 | .digestsize = SHA256_DIGEST_SIZE, | ||
866 | .base = { | ||
867 | .cra_name = "sha256", | ||
868 | .cra_driver_name = "atmel-sha256", | ||
869 | .cra_priority = 100, | ||
870 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
871 | CRYPTO_ALG_NEED_FALLBACK, | ||
872 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
873 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
874 | .cra_alignmask = 0, | ||
875 | .cra_module = THIS_MODULE, | ||
876 | .cra_init = atmel_sha_cra_init, | ||
877 | .cra_exit = atmel_sha_cra_exit, | ||
878 | } | ||
879 | } | ||
880 | }, | ||
881 | }; | ||
882 | |||
883 | static void atmel_sha_done_task(unsigned long data) | ||
884 | { | ||
885 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; | ||
886 | int err = 0; | ||
887 | |||
888 | if (!(SHA_FLAGS_BUSY & dd->flags)) { | ||
889 | atmel_sha_handle_queue(dd, NULL); | ||
890 | return; | ||
891 | } | ||
892 | |||
893 | if (SHA_FLAGS_CPU & dd->flags) { | ||
894 | if (SHA_FLAGS_OUTPUT_READY & dd->flags) { | ||
895 | dd->flags &= ~SHA_FLAGS_OUTPUT_READY; | ||
896 | goto finish; | ||
897 | } | ||
898 | } else if (SHA_FLAGS_DMA_READY & dd->flags) { | ||
899 | if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { | ||
900 | dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; | ||
901 | atmel_sha_update_dma_stop(dd); | ||
902 | if (dd->err) { | ||
903 | err = dd->err; | ||
904 | goto finish; | ||
905 | } | ||
906 | } | ||
907 | if (SHA_FLAGS_OUTPUT_READY & dd->flags) { | ||
908 | /* hash or semi-hash ready */ | ||
909 | dd->flags &= ~(SHA_FLAGS_DMA_READY | | ||
910 | SHA_FLAGS_OUTPUT_READY); | ||
911 | err = atmel_sha_update_dma_start(dd); | ||
912 | if (err != -EINPROGRESS) | ||
913 | goto finish; | ||
914 | } | ||
915 | } | ||
916 | return; | ||
917 | |||
918 | finish: | ||
919 | /* finish curent request */ | ||
920 | atmel_sha_finish_req(dd->req, err); | ||
921 | } | ||
922 | |||
923 | static irqreturn_t atmel_sha_irq(int irq, void *dev_id) | ||
924 | { | ||
925 | struct atmel_sha_dev *sha_dd = dev_id; | ||
926 | u32 reg; | ||
927 | |||
928 | reg = atmel_sha_read(sha_dd, SHA_ISR); | ||
929 | if (reg & atmel_sha_read(sha_dd, SHA_IMR)) { | ||
930 | atmel_sha_write(sha_dd, SHA_IDR, reg); | ||
931 | if (SHA_FLAGS_BUSY & sha_dd->flags) { | ||
932 | sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; | ||
933 | if (!(SHA_FLAGS_CPU & sha_dd->flags)) | ||
934 | sha_dd->flags |= SHA_FLAGS_DMA_READY; | ||
935 | tasklet_schedule(&sha_dd->done_task); | ||
936 | } else { | ||
937 | dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); | ||
938 | } | ||
939 | return IRQ_HANDLED; | ||
940 | } | ||
941 | |||
942 | return IRQ_NONE; | ||
943 | } | ||
944 | |||
945 | static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) | ||
946 | { | ||
947 | int i; | ||
948 | |||
949 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) | ||
950 | crypto_unregister_ahash(&sha_algs[i]); | ||
951 | } | ||
952 | |||
953 | static int atmel_sha_register_algs(struct atmel_sha_dev *dd) | ||
954 | { | ||
955 | int err, i, j; | ||
956 | |||
957 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { | ||
958 | err = crypto_register_ahash(&sha_algs[i]); | ||
959 | if (err) | ||
960 | goto err_sha_algs; | ||
961 | } | ||
962 | |||
963 | return 0; | ||
964 | |||
965 | err_sha_algs: | ||
966 | for (j = 0; j < i; j++) | ||
967 | crypto_unregister_ahash(&sha_algs[j]); | ||
968 | |||
969 | return err; | ||
970 | } | ||
971 | |||
972 | static int __devinit atmel_sha_probe(struct platform_device *pdev) | ||
973 | { | ||
974 | struct atmel_sha_dev *sha_dd; | ||
975 | struct device *dev = &pdev->dev; | ||
976 | struct resource *sha_res; | ||
977 | unsigned long sha_phys_size; | ||
978 | int err; | ||
979 | |||
980 | sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL); | ||
981 | if (sha_dd == NULL) { | ||
982 | dev_err(dev, "unable to alloc data struct.\n"); | ||
983 | err = -ENOMEM; | ||
984 | goto sha_dd_err; | ||
985 | } | ||
986 | |||
987 | sha_dd->dev = dev; | ||
988 | |||
989 | platform_set_drvdata(pdev, sha_dd); | ||
990 | |||
991 | INIT_LIST_HEAD(&sha_dd->list); | ||
992 | |||
993 | tasklet_init(&sha_dd->done_task, atmel_sha_done_task, | ||
994 | (unsigned long)sha_dd); | ||
995 | |||
996 | crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); | ||
997 | |||
998 | sha_dd->irq = -1; | ||
999 | |||
1000 | /* Get the base address */ | ||
1001 | sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1002 | if (!sha_res) { | ||
1003 | dev_err(dev, "no MEM resource info\n"); | ||
1004 | err = -ENODEV; | ||
1005 | goto res_err; | ||
1006 | } | ||
1007 | sha_dd->phys_base = sha_res->start; | ||
1008 | sha_phys_size = resource_size(sha_res); | ||
1009 | |||
1010 | /* Get the IRQ */ | ||
1011 | sha_dd->irq = platform_get_irq(pdev, 0); | ||
1012 | if (sha_dd->irq < 0) { | ||
1013 | dev_err(dev, "no IRQ resource info\n"); | ||
1014 | err = sha_dd->irq; | ||
1015 | goto res_err; | ||
1016 | } | ||
1017 | |||
1018 | err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha", | ||
1019 | sha_dd); | ||
1020 | if (err) { | ||
1021 | dev_err(dev, "unable to request sha irq.\n"); | ||
1022 | goto res_err; | ||
1023 | } | ||
1024 | |||
1025 | /* Initializing the clock */ | ||
1026 | sha_dd->iclk = clk_get(&pdev->dev, NULL); | ||
1027 | if (IS_ERR(sha_dd->iclk)) { | ||
1028 | dev_err(dev, "clock intialization failed.\n"); | ||
1029 | err = PTR_ERR(sha_dd->iclk); | ||
1030 | goto clk_err; | ||
1031 | } | ||
1032 | |||
1033 | sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size); | ||
1034 | if (!sha_dd->io_base) { | ||
1035 | dev_err(dev, "can't ioremap\n"); | ||
1036 | err = -ENOMEM; | ||
1037 | goto sha_io_err; | ||
1038 | } | ||
1039 | |||
1040 | spin_lock(&atmel_sha.lock); | ||
1041 | list_add_tail(&sha_dd->list, &atmel_sha.dev_list); | ||
1042 | spin_unlock(&atmel_sha.lock); | ||
1043 | |||
1044 | err = atmel_sha_register_algs(sha_dd); | ||
1045 | if (err) | ||
1046 | goto err_algs; | ||
1047 | |||
1048 | dev_info(dev, "Atmel SHA1/SHA256\n"); | ||
1049 | |||
1050 | return 0; | ||
1051 | |||
1052 | err_algs: | ||
1053 | spin_lock(&atmel_sha.lock); | ||
1054 | list_del(&sha_dd->list); | ||
1055 | spin_unlock(&atmel_sha.lock); | ||
1056 | iounmap(sha_dd->io_base); | ||
1057 | sha_io_err: | ||
1058 | clk_put(sha_dd->iclk); | ||
1059 | clk_err: | ||
1060 | free_irq(sha_dd->irq, sha_dd); | ||
1061 | res_err: | ||
1062 | tasklet_kill(&sha_dd->done_task); | ||
1063 | kfree(sha_dd); | ||
1064 | sha_dd = NULL; | ||
1065 | sha_dd_err: | ||
1066 | dev_err(dev, "initialization failed.\n"); | ||
1067 | |||
1068 | return err; | ||
1069 | } | ||
1070 | |||
1071 | static int __devexit atmel_sha_remove(struct platform_device *pdev) | ||
1072 | { | ||
1073 | static struct atmel_sha_dev *sha_dd; | ||
1074 | |||
1075 | sha_dd = platform_get_drvdata(pdev); | ||
1076 | if (!sha_dd) | ||
1077 | return -ENODEV; | ||
1078 | spin_lock(&atmel_sha.lock); | ||
1079 | list_del(&sha_dd->list); | ||
1080 | spin_unlock(&atmel_sha.lock); | ||
1081 | |||
1082 | atmel_sha_unregister_algs(sha_dd); | ||
1083 | |||
1084 | tasklet_kill(&sha_dd->done_task); | ||
1085 | |||
1086 | iounmap(sha_dd->io_base); | ||
1087 | |||
1088 | clk_put(sha_dd->iclk); | ||
1089 | |||
1090 | if (sha_dd->irq >= 0) | ||
1091 | free_irq(sha_dd->irq, sha_dd); | ||
1092 | |||
1093 | kfree(sha_dd); | ||
1094 | sha_dd = NULL; | ||
1095 | |||
1096 | return 0; | ||
1097 | } | ||
1098 | |||
1099 | static struct platform_driver atmel_sha_driver = { | ||
1100 | .probe = atmel_sha_probe, | ||
1101 | .remove = __devexit_p(atmel_sha_remove), | ||
1102 | .driver = { | ||
1103 | .name = "atmel_sha", | ||
1104 | .owner = THIS_MODULE, | ||
1105 | }, | ||
1106 | }; | ||
1107 | |||
1108 | module_platform_driver(atmel_sha_driver); | ||
1109 | |||
1110 | MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support."); | ||
1111 | MODULE_LICENSE("GPL v2"); | ||
1112 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); | ||
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h new file mode 100644 index 000000000000..5ac2a900d80c --- /dev/null +++ b/drivers/crypto/atmel-tdes-regs.h | |||
@@ -0,0 +1,89 @@ | |||
1 | #ifndef __ATMEL_TDES_REGS_H__ | ||
2 | #define __ATMEL_TDES_REGS_H__ | ||
3 | |||
4 | #define TDES_CR 0x00 | ||
5 | #define TDES_CR_START (1 << 0) | ||
6 | #define TDES_CR_SWRST (1 << 8) | ||
7 | #define TDES_CR_LOADSEED (1 << 16) | ||
8 | |||
9 | #define TDES_MR 0x04 | ||
10 | #define TDES_MR_CYPHER_DEC (0 << 0) | ||
11 | #define TDES_MR_CYPHER_ENC (1 << 0) | ||
12 | #define TDES_MR_TDESMOD_MASK (0x3 << 1) | ||
13 | #define TDES_MR_TDESMOD_DES (0x0 << 1) | ||
14 | #define TDES_MR_TDESMOD_TDES (0x1 << 1) | ||
15 | #define TDES_MR_TDESMOD_XTEA (0x2 << 1) | ||
16 | #define TDES_MR_KEYMOD_3KEY (0 << 4) | ||
17 | #define TDES_MR_KEYMOD_2KEY (1 << 4) | ||
18 | #define TDES_MR_SMOD_MASK (0x3 << 8) | ||
19 | #define TDES_MR_SMOD_MANUAL (0x0 << 8) | ||
20 | #define TDES_MR_SMOD_AUTO (0x1 << 8) | ||
21 | #define TDES_MR_SMOD_PDC (0x2 << 8) | ||
22 | #define TDES_MR_OPMOD_MASK (0x3 << 12) | ||
23 | #define TDES_MR_OPMOD_ECB (0x0 << 12) | ||
24 | #define TDES_MR_OPMOD_CBC (0x1 << 12) | ||
25 | #define TDES_MR_OPMOD_OFB (0x2 << 12) | ||
26 | #define TDES_MR_OPMOD_CFB (0x3 << 12) | ||
27 | #define TDES_MR_LOD (0x1 << 15) | ||
28 | #define TDES_MR_CFBS_MASK (0x3 << 16) | ||
29 | #define TDES_MR_CFBS_64b (0x0 << 16) | ||
30 | #define TDES_MR_CFBS_32b (0x1 << 16) | ||
31 | #define TDES_MR_CFBS_16b (0x2 << 16) | ||
32 | #define TDES_MR_CFBS_8b (0x3 << 16) | ||
33 | #define TDES_MR_CKEY_MASK (0xF << 20) | ||
34 | #define TDES_MR_CKEY_OFFSET 20 | ||
35 | #define TDES_MR_CTYPE_MASK (0x3F << 24) | ||
36 | #define TDES_MR_CTYPE_OFFSET 24 | ||
37 | |||
38 | #define TDES_IER 0x10 | ||
39 | #define TDES_IDR 0x14 | ||
40 | #define TDES_IMR 0x18 | ||
41 | #define TDES_ISR 0x1C | ||
42 | #define TDES_INT_DATARDY (1 << 0) | ||
43 | #define TDES_INT_ENDRX (1 << 1) | ||
44 | #define TDES_INT_ENDTX (1 << 2) | ||
45 | #define TDES_INT_RXBUFF (1 << 3) | ||
46 | #define TDES_INT_TXBUFE (1 << 4) | ||
47 | #define TDES_INT_URAD (1 << 8) | ||
48 | #define TDES_ISR_URAT_MASK (0x3 << 12) | ||
49 | #define TDES_ISR_URAT_IDR (0x0 << 12) | ||
50 | #define TDES_ISR_URAT_ODR (0x1 << 12) | ||
51 | #define TDES_ISR_URAT_MR (0x2 << 12) | ||
52 | #define TDES_ISR_URAT_WO (0x3 << 12) | ||
53 | |||
54 | |||
55 | #define TDES_KEY1W1R 0x20 | ||
56 | #define TDES_KEY1W2R 0x24 | ||
57 | #define TDES_KEY2W1R 0x28 | ||
58 | #define TDES_KEY2W2R 0x2C | ||
59 | #define TDES_KEY3W1R 0x30 | ||
60 | #define TDES_KEY3W2R 0x34 | ||
61 | #define TDES_IDATA1R 0x40 | ||
62 | #define TDES_IDATA2R 0x44 | ||
63 | #define TDES_ODATA1R 0x50 | ||
64 | #define TDES_ODATA2R 0x54 | ||
65 | #define TDES_IV1R 0x60 | ||
66 | #define TDES_IV2R 0x64 | ||
67 | |||
68 | #define TDES_XTEARNDR 0x70 | ||
69 | #define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0) | ||
70 | #define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0 | ||
71 | |||
72 | #define TDES_RPR 0x100 | ||
73 | #define TDES_RCR 0x104 | ||
74 | #define TDES_TPR 0x108 | ||
75 | #define TDES_TCR 0x10C | ||
76 | #define TDES_RNPR 0x118 | ||
77 | #define TDES_RNCR 0x11C | ||
78 | #define TDES_TNPR 0x118 | ||
79 | #define TDES_TNCR 0x11C | ||
80 | #define TDES_PTCR 0x120 | ||
81 | #define TDES_PTCR_RXTEN (1 << 0) | ||
82 | #define TDES_PTCR_RXTDIS (1 << 1) | ||
83 | #define TDES_PTCR_TXTEN (1 << 8) | ||
84 | #define TDES_PTCR_TXTDIS (1 << 9) | ||
85 | #define TDES_PTSR 0x124 | ||
86 | #define TDES_PTSR_RXTEN (1 << 0) | ||
87 | #define TDES_PTSR_TXTEN (1 << 8) | ||
88 | |||
89 | #endif /* __ATMEL_TDES_REGS_H__ */ | ||
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c new file mode 100644 index 000000000000..eb2b61e57e2d --- /dev/null +++ b/drivers/crypto/atmel-tdes.c | |||
@@ -0,0 +1,1215 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for ATMEL DES/TDES HW acceleration. | ||
5 | * | ||
6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | ||
7 | * Author: Nicolas Royer <nicolas@eukrea.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * Some ideas are from omap-aes.c drivers. | ||
14 | */ | ||
15 | |||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/hw_random.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | |||
26 | #include <linux/device.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/clk.h> | ||
33 | #include <linux/irq.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/scatterlist.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/crypto.h> | ||
40 | #include <linux/cryptohash.h> | ||
41 | #include <crypto/scatterwalk.h> | ||
42 | #include <crypto/algapi.h> | ||
43 | #include <crypto/des.h> | ||
44 | #include <crypto/hash.h> | ||
45 | #include <crypto/internal/hash.h> | ||
46 | #include "atmel-tdes-regs.h" | ||
47 | |||
48 | /* TDES flags */ | ||
49 | #define TDES_FLAGS_MODE_MASK 0x007f | ||
50 | #define TDES_FLAGS_ENCRYPT BIT(0) | ||
51 | #define TDES_FLAGS_CBC BIT(1) | ||
52 | #define TDES_FLAGS_CFB BIT(2) | ||
53 | #define TDES_FLAGS_CFB8 BIT(3) | ||
54 | #define TDES_FLAGS_CFB16 BIT(4) | ||
55 | #define TDES_FLAGS_CFB32 BIT(5) | ||
56 | #define TDES_FLAGS_OFB BIT(6) | ||
57 | |||
58 | #define TDES_FLAGS_INIT BIT(16) | ||
59 | #define TDES_FLAGS_FAST BIT(17) | ||
60 | #define TDES_FLAGS_BUSY BIT(18) | ||
61 | |||
62 | #define ATMEL_TDES_QUEUE_LENGTH 1 | ||
63 | |||
64 | #define CFB8_BLOCK_SIZE 1 | ||
65 | #define CFB16_BLOCK_SIZE 2 | ||
66 | #define CFB32_BLOCK_SIZE 4 | ||
67 | #define CFB64_BLOCK_SIZE 8 | ||
68 | |||
69 | |||
70 | struct atmel_tdes_dev; | ||
71 | |||
72 | struct atmel_tdes_ctx { | ||
73 | struct atmel_tdes_dev *dd; | ||
74 | |||
75 | int keylen; | ||
76 | u32 key[3*DES_KEY_SIZE / sizeof(u32)]; | ||
77 | unsigned long flags; | ||
78 | }; | ||
79 | |||
80 | struct atmel_tdes_reqctx { | ||
81 | unsigned long mode; | ||
82 | }; | ||
83 | |||
84 | struct atmel_tdes_dev { | ||
85 | struct list_head list; | ||
86 | unsigned long phys_base; | ||
87 | void __iomem *io_base; | ||
88 | |||
89 | struct atmel_tdes_ctx *ctx; | ||
90 | struct device *dev; | ||
91 | struct clk *iclk; | ||
92 | int irq; | ||
93 | |||
94 | unsigned long flags; | ||
95 | int err; | ||
96 | |||
97 | spinlock_t lock; | ||
98 | struct crypto_queue queue; | ||
99 | |||
100 | struct tasklet_struct done_task; | ||
101 | struct tasklet_struct queue_task; | ||
102 | |||
103 | struct ablkcipher_request *req; | ||
104 | size_t total; | ||
105 | |||
106 | struct scatterlist *in_sg; | ||
107 | size_t in_offset; | ||
108 | struct scatterlist *out_sg; | ||
109 | size_t out_offset; | ||
110 | |||
111 | size_t buflen; | ||
112 | size_t dma_size; | ||
113 | |||
114 | void *buf_in; | ||
115 | int dma_in; | ||
116 | dma_addr_t dma_addr_in; | ||
117 | |||
118 | void *buf_out; | ||
119 | int dma_out; | ||
120 | dma_addr_t dma_addr_out; | ||
121 | }; | ||
122 | |||
123 | struct atmel_tdes_drv { | ||
124 | struct list_head dev_list; | ||
125 | spinlock_t lock; | ||
126 | }; | ||
127 | |||
128 | static struct atmel_tdes_drv atmel_tdes = { | ||
129 | .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list), | ||
130 | .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock), | ||
131 | }; | ||
132 | |||
133 | static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset, | ||
134 | void *buf, size_t buflen, size_t total, int out) | ||
135 | { | ||
136 | unsigned int count, off = 0; | ||
137 | |||
138 | while (buflen && total) { | ||
139 | count = min((*sg)->length - *offset, total); | ||
140 | count = min(count, buflen); | ||
141 | |||
142 | if (!count) | ||
143 | return off; | ||
144 | |||
145 | scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); | ||
146 | |||
147 | off += count; | ||
148 | buflen -= count; | ||
149 | *offset += count; | ||
150 | total -= count; | ||
151 | |||
152 | if (*offset == (*sg)->length) { | ||
153 | *sg = sg_next(*sg); | ||
154 | if (*sg) | ||
155 | *offset = 0; | ||
156 | else | ||
157 | total = 0; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | return off; | ||
162 | } | ||
163 | |||
164 | static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset) | ||
165 | { | ||
166 | return readl_relaxed(dd->io_base + offset); | ||
167 | } | ||
168 | |||
169 | static inline void atmel_tdes_write(struct atmel_tdes_dev *dd, | ||
170 | u32 offset, u32 value) | ||
171 | { | ||
172 | writel_relaxed(value, dd->io_base + offset); | ||
173 | } | ||
174 | |||
175 | static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset, | ||
176 | u32 *value, int count) | ||
177 | { | ||
178 | for (; count--; value++, offset += 4) | ||
179 | atmel_tdes_write(dd, offset, *value); | ||
180 | } | ||
181 | |||
182 | static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx) | ||
183 | { | ||
184 | struct atmel_tdes_dev *tdes_dd = NULL; | ||
185 | struct atmel_tdes_dev *tmp; | ||
186 | |||
187 | spin_lock_bh(&atmel_tdes.lock); | ||
188 | if (!ctx->dd) { | ||
189 | list_for_each_entry(tmp, &atmel_tdes.dev_list, list) { | ||
190 | tdes_dd = tmp; | ||
191 | break; | ||
192 | } | ||
193 | ctx->dd = tdes_dd; | ||
194 | } else { | ||
195 | tdes_dd = ctx->dd; | ||
196 | } | ||
197 | spin_unlock_bh(&atmel_tdes.lock); | ||
198 | |||
199 | return tdes_dd; | ||
200 | } | ||
201 | |||
202 | static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd) | ||
203 | { | ||
204 | clk_prepare_enable(dd->iclk); | ||
205 | |||
206 | if (!(dd->flags & TDES_FLAGS_INIT)) { | ||
207 | atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST); | ||
208 | dd->flags |= TDES_FLAGS_INIT; | ||
209 | dd->err = 0; | ||
210 | } | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) | ||
216 | { | ||
217 | int err; | ||
218 | u32 valcr = 0, valmr = TDES_MR_SMOD_PDC; | ||
219 | |||
220 | err = atmel_tdes_hw_init(dd); | ||
221 | |||
222 | if (err) | ||
223 | return err; | ||
224 | |||
225 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); | ||
226 | |||
227 | /* MR register must be set before IV registers */ | ||
228 | if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) { | ||
229 | valmr |= TDES_MR_KEYMOD_3KEY; | ||
230 | valmr |= TDES_MR_TDESMOD_TDES; | ||
231 | } else if (dd->ctx->keylen > DES_KEY_SIZE) { | ||
232 | valmr |= TDES_MR_KEYMOD_2KEY; | ||
233 | valmr |= TDES_MR_TDESMOD_TDES; | ||
234 | } else { | ||
235 | valmr |= TDES_MR_TDESMOD_DES; | ||
236 | } | ||
237 | |||
238 | if (dd->flags & TDES_FLAGS_CBC) { | ||
239 | valmr |= TDES_MR_OPMOD_CBC; | ||
240 | } else if (dd->flags & TDES_FLAGS_CFB) { | ||
241 | valmr |= TDES_MR_OPMOD_CFB; | ||
242 | |||
243 | if (dd->flags & TDES_FLAGS_CFB8) | ||
244 | valmr |= TDES_MR_CFBS_8b; | ||
245 | else if (dd->flags & TDES_FLAGS_CFB16) | ||
246 | valmr |= TDES_MR_CFBS_16b; | ||
247 | else if (dd->flags & TDES_FLAGS_CFB32) | ||
248 | valmr |= TDES_MR_CFBS_32b; | ||
249 | } else if (dd->flags & TDES_FLAGS_OFB) { | ||
250 | valmr |= TDES_MR_OPMOD_OFB; | ||
251 | } | ||
252 | |||
253 | if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB)) | ||
254 | valmr |= TDES_MR_CYPHER_ENC; | ||
255 | |||
256 | atmel_tdes_write(dd, TDES_CR, valcr); | ||
257 | atmel_tdes_write(dd, TDES_MR, valmr); | ||
258 | |||
259 | atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key, | ||
260 | dd->ctx->keylen >> 2); | ||
261 | |||
262 | if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) || | ||
263 | (dd->flags & TDES_FLAGS_OFB)) && dd->req->info) { | ||
264 | atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2); | ||
265 | } | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd) | ||
271 | { | ||
272 | int err = 0; | ||
273 | size_t count; | ||
274 | |||
275 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); | ||
276 | |||
277 | if (dd->flags & TDES_FLAGS_FAST) { | ||
278 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
279 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
280 | } else { | ||
281 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | ||
282 | dd->dma_size, DMA_FROM_DEVICE); | ||
283 | |||
284 | /* copy data */ | ||
285 | count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset, | ||
286 | dd->buf_out, dd->buflen, dd->dma_size, 1); | ||
287 | if (count != dd->dma_size) { | ||
288 | err = -EINVAL; | ||
289 | pr_err("not all data converted: %u\n", count); | ||
290 | } | ||
291 | } | ||
292 | |||
293 | return err; | ||
294 | } | ||
295 | |||
296 | static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd) | ||
297 | { | ||
298 | int err = -ENOMEM; | ||
299 | |||
300 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); | ||
301 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); | ||
302 | dd->buflen = PAGE_SIZE; | ||
303 | dd->buflen &= ~(DES_BLOCK_SIZE - 1); | ||
304 | |||
305 | if (!dd->buf_in || !dd->buf_out) { | ||
306 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
307 | goto err_alloc; | ||
308 | } | ||
309 | |||
310 | /* MAP here */ | ||
311 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, | ||
312 | dd->buflen, DMA_TO_DEVICE); | ||
313 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
314 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
315 | err = -EINVAL; | ||
316 | goto err_map_in; | ||
317 | } | ||
318 | |||
319 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, | ||
320 | dd->buflen, DMA_FROM_DEVICE); | ||
321 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | ||
322 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
323 | err = -EINVAL; | ||
324 | goto err_map_out; | ||
325 | } | ||
326 | |||
327 | return 0; | ||
328 | |||
329 | err_map_out: | ||
330 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
331 | DMA_TO_DEVICE); | ||
332 | err_map_in: | ||
333 | free_page((unsigned long)dd->buf_out); | ||
334 | free_page((unsigned long)dd->buf_in); | ||
335 | err_alloc: | ||
336 | if (err) | ||
337 | pr_err("error: %d\n", err); | ||
338 | return err; | ||
339 | } | ||
340 | |||
341 | static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) | ||
342 | { | ||
343 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
344 | DMA_FROM_DEVICE); | ||
345 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
346 | DMA_TO_DEVICE); | ||
347 | free_page((unsigned long)dd->buf_out); | ||
348 | free_page((unsigned long)dd->buf_in); | ||
349 | } | ||
350 | |||
351 | static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | ||
352 | dma_addr_t dma_addr_out, int length) | ||
353 | { | ||
354 | struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
355 | struct atmel_tdes_dev *dd = ctx->dd; | ||
356 | int len32; | ||
357 | |||
358 | dd->dma_size = length; | ||
359 | |||
360 | if (!(dd->flags & TDES_FLAGS_FAST)) { | ||
361 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | ||
362 | DMA_TO_DEVICE); | ||
363 | } | ||
364 | |||
365 | if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8)) | ||
366 | len32 = DIV_ROUND_UP(length, sizeof(u8)); | ||
367 | else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16)) | ||
368 | len32 = DIV_ROUND_UP(length, sizeof(u16)); | ||
369 | else | ||
370 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
371 | |||
372 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); | ||
373 | atmel_tdes_write(dd, TDES_TPR, dma_addr_in); | ||
374 | atmel_tdes_write(dd, TDES_TCR, len32); | ||
375 | atmel_tdes_write(dd, TDES_RPR, dma_addr_out); | ||
376 | atmel_tdes_write(dd, TDES_RCR, len32); | ||
377 | |||
378 | /* Enable Interrupt */ | ||
379 | atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX); | ||
380 | |||
381 | /* Start DMA transfer */ | ||
382 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN); | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd) | ||
388 | { | ||
389 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( | ||
390 | crypto_ablkcipher_reqtfm(dd->req)); | ||
391 | int err, fast = 0, in, out; | ||
392 | size_t count; | ||
393 | dma_addr_t addr_in, addr_out; | ||
394 | |||
395 | if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { | ||
396 | /* check for alignment */ | ||
397 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); | ||
398 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); | ||
399 | |||
400 | fast = in && out; | ||
401 | } | ||
402 | |||
403 | if (fast) { | ||
404 | count = min(dd->total, sg_dma_len(dd->in_sg)); | ||
405 | count = min(count, sg_dma_len(dd->out_sg)); | ||
406 | |||
407 | if (count != dd->total) { | ||
408 | pr_err("request length != buffer length\n"); | ||
409 | return -EINVAL; | ||
410 | } | ||
411 | |||
412 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
413 | if (!err) { | ||
414 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
415 | return -EINVAL; | ||
416 | } | ||
417 | |||
418 | err = dma_map_sg(dd->dev, dd->out_sg, 1, | ||
419 | DMA_FROM_DEVICE); | ||
420 | if (!err) { | ||
421 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
422 | dma_unmap_sg(dd->dev, dd->in_sg, 1, | ||
423 | DMA_TO_DEVICE); | ||
424 | return -EINVAL; | ||
425 | } | ||
426 | |||
427 | addr_in = sg_dma_address(dd->in_sg); | ||
428 | addr_out = sg_dma_address(dd->out_sg); | ||
429 | |||
430 | dd->flags |= TDES_FLAGS_FAST; | ||
431 | |||
432 | } else { | ||
433 | /* use cache buffers */ | ||
434 | count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset, | ||
435 | dd->buf_in, dd->buflen, dd->total, 0); | ||
436 | |||
437 | addr_in = dd->dma_addr_in; | ||
438 | addr_out = dd->dma_addr_out; | ||
439 | |||
440 | dd->flags &= ~TDES_FLAGS_FAST; | ||
441 | |||
442 | } | ||
443 | |||
444 | dd->total -= count; | ||
445 | |||
446 | err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count); | ||
447 | if (err) { | ||
448 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
449 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | ||
450 | } | ||
451 | |||
452 | return err; | ||
453 | } | ||
454 | |||
455 | |||
456 | static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err) | ||
457 | { | ||
458 | struct ablkcipher_request *req = dd->req; | ||
459 | |||
460 | clk_disable_unprepare(dd->iclk); | ||
461 | |||
462 | dd->flags &= ~TDES_FLAGS_BUSY; | ||
463 | |||
464 | req->base.complete(&req->base, err); | ||
465 | } | ||
466 | |||
467 | static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd, | ||
468 | struct ablkcipher_request *req) | ||
469 | { | ||
470 | struct crypto_async_request *async_req, *backlog; | ||
471 | struct atmel_tdes_ctx *ctx; | ||
472 | struct atmel_tdes_reqctx *rctx; | ||
473 | unsigned long flags; | ||
474 | int err, ret = 0; | ||
475 | |||
476 | spin_lock_irqsave(&dd->lock, flags); | ||
477 | if (req) | ||
478 | ret = ablkcipher_enqueue_request(&dd->queue, req); | ||
479 | if (dd->flags & TDES_FLAGS_BUSY) { | ||
480 | spin_unlock_irqrestore(&dd->lock, flags); | ||
481 | return ret; | ||
482 | } | ||
483 | backlog = crypto_get_backlog(&dd->queue); | ||
484 | async_req = crypto_dequeue_request(&dd->queue); | ||
485 | if (async_req) | ||
486 | dd->flags |= TDES_FLAGS_BUSY; | ||
487 | spin_unlock_irqrestore(&dd->lock, flags); | ||
488 | |||
489 | if (!async_req) | ||
490 | return ret; | ||
491 | |||
492 | if (backlog) | ||
493 | backlog->complete(backlog, -EINPROGRESS); | ||
494 | |||
495 | req = ablkcipher_request_cast(async_req); | ||
496 | |||
497 | /* assign new request to device */ | ||
498 | dd->req = req; | ||
499 | dd->total = req->nbytes; | ||
500 | dd->in_offset = 0; | ||
501 | dd->in_sg = req->src; | ||
502 | dd->out_offset = 0; | ||
503 | dd->out_sg = req->dst; | ||
504 | |||
505 | rctx = ablkcipher_request_ctx(req); | ||
506 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
507 | rctx->mode &= TDES_FLAGS_MODE_MASK; | ||
508 | dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode; | ||
509 | dd->ctx = ctx; | ||
510 | ctx->dd = dd; | ||
511 | |||
512 | err = atmel_tdes_write_ctrl(dd); | ||
513 | if (!err) | ||
514 | err = atmel_tdes_crypt_dma_start(dd); | ||
515 | if (err) { | ||
516 | /* des_task will not finish it, so do it here */ | ||
517 | atmel_tdes_finish_req(dd, err); | ||
518 | tasklet_schedule(&dd->queue_task); | ||
519 | } | ||
520 | |||
521 | return ret; | ||
522 | } | ||
523 | |||
524 | |||
525 | static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
526 | { | ||
527 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx( | ||
528 | crypto_ablkcipher_reqtfm(req)); | ||
529 | struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
530 | struct atmel_tdes_dev *dd; | ||
531 | |||
532 | if (mode & TDES_FLAGS_CFB8) { | ||
533 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { | ||
534 | pr_err("request size is not exact amount of CFB8 blocks\n"); | ||
535 | return -EINVAL; | ||
536 | } | ||
537 | } else if (mode & TDES_FLAGS_CFB16) { | ||
538 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { | ||
539 | pr_err("request size is not exact amount of CFB16 blocks\n"); | ||
540 | return -EINVAL; | ||
541 | } | ||
542 | } else if (mode & TDES_FLAGS_CFB32) { | ||
543 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { | ||
544 | pr_err("request size is not exact amount of CFB32 blocks\n"); | ||
545 | return -EINVAL; | ||
546 | } | ||
547 | } else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) { | ||
548 | pr_err("request size is not exact amount of DES blocks\n"); | ||
549 | return -EINVAL; | ||
550 | } | ||
551 | |||
552 | dd = atmel_tdes_find_dev(ctx); | ||
553 | if (!dd) | ||
554 | return -ENODEV; | ||
555 | |||
556 | rctx->mode = mode; | ||
557 | |||
558 | return atmel_tdes_handle_queue(dd, req); | ||
559 | } | ||
560 | |||
561 | static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
562 | unsigned int keylen) | ||
563 | { | ||
564 | u32 tmp[DES_EXPKEY_WORDS]; | ||
565 | int err; | ||
566 | struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm); | ||
567 | |||
568 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
569 | |||
570 | if (keylen != DES_KEY_SIZE) { | ||
571 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
572 | return -EINVAL; | ||
573 | } | ||
574 | |||
575 | err = des_ekey(tmp, key); | ||
576 | if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
577 | ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | |||
581 | memcpy(ctx->key, key, keylen); | ||
582 | ctx->keylen = keylen; | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
588 | unsigned int keylen) | ||
589 | { | ||
590 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
591 | const char *alg_name; | ||
592 | |||
593 | alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)); | ||
594 | |||
595 | /* | ||
596 | * HW bug in cfb 3-keys mode. | ||
597 | */ | ||
598 | if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) { | ||
599 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
600 | return -EINVAL; | ||
601 | } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) { | ||
602 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
603 | return -EINVAL; | ||
604 | } | ||
605 | |||
606 | memcpy(ctx->key, key, keylen); | ||
607 | ctx->keylen = keylen; | ||
608 | |||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req) | ||
613 | { | ||
614 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT); | ||
615 | } | ||
616 | |||
617 | static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req) | ||
618 | { | ||
619 | return atmel_tdes_crypt(req, 0); | ||
620 | } | ||
621 | |||
622 | static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req) | ||
623 | { | ||
624 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC); | ||
625 | } | ||
626 | |||
627 | static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req) | ||
628 | { | ||
629 | return atmel_tdes_crypt(req, TDES_FLAGS_CBC); | ||
630 | } | ||
631 | static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req) | ||
632 | { | ||
633 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB); | ||
634 | } | ||
635 | |||
636 | static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req) | ||
637 | { | ||
638 | return atmel_tdes_crypt(req, TDES_FLAGS_CFB); | ||
639 | } | ||
640 | |||
641 | static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req) | ||
642 | { | ||
643 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | | ||
644 | TDES_FLAGS_CFB8); | ||
645 | } | ||
646 | |||
647 | static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req) | ||
648 | { | ||
649 | return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8); | ||
650 | } | ||
651 | |||
652 | static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req) | ||
653 | { | ||
654 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | | ||
655 | TDES_FLAGS_CFB16); | ||
656 | } | ||
657 | |||
658 | static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req) | ||
659 | { | ||
660 | return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16); | ||
661 | } | ||
662 | |||
663 | static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req) | ||
664 | { | ||
665 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | | ||
666 | TDES_FLAGS_CFB32); | ||
667 | } | ||
668 | |||
669 | static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req) | ||
670 | { | ||
671 | return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32); | ||
672 | } | ||
673 | |||
674 | static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req) | ||
675 | { | ||
676 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB); | ||
677 | } | ||
678 | |||
679 | static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req) | ||
680 | { | ||
681 | return atmel_tdes_crypt(req, TDES_FLAGS_OFB); | ||
682 | } | ||
683 | |||
684 | static int atmel_tdes_cra_init(struct crypto_tfm *tfm) | ||
685 | { | ||
686 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx); | ||
687 | |||
688 | return 0; | ||
689 | } | ||
690 | |||
691 | static void atmel_tdes_cra_exit(struct crypto_tfm *tfm) | ||
692 | { | ||
693 | } | ||
694 | |||
695 | static struct crypto_alg tdes_algs[] = { | ||
696 | { | ||
697 | .cra_name = "ecb(des)", | ||
698 | .cra_driver_name = "atmel-ecb-des", | ||
699 | .cra_priority = 100, | ||
700 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
701 | .cra_blocksize = DES_BLOCK_SIZE, | ||
702 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
703 | .cra_alignmask = 0, | ||
704 | .cra_type = &crypto_ablkcipher_type, | ||
705 | .cra_module = THIS_MODULE, | ||
706 | .cra_init = atmel_tdes_cra_init, | ||
707 | .cra_exit = atmel_tdes_cra_exit, | ||
708 | .cra_u.ablkcipher = { | ||
709 | .min_keysize = DES_KEY_SIZE, | ||
710 | .max_keysize = DES_KEY_SIZE, | ||
711 | .setkey = atmel_des_setkey, | ||
712 | .encrypt = atmel_tdes_ecb_encrypt, | ||
713 | .decrypt = atmel_tdes_ecb_decrypt, | ||
714 | } | ||
715 | }, | ||
716 | { | ||
717 | .cra_name = "cbc(des)", | ||
718 | .cra_driver_name = "atmel-cbc-des", | ||
719 | .cra_priority = 100, | ||
720 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
721 | .cra_blocksize = DES_BLOCK_SIZE, | ||
722 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
723 | .cra_alignmask = 0, | ||
724 | .cra_type = &crypto_ablkcipher_type, | ||
725 | .cra_module = THIS_MODULE, | ||
726 | .cra_init = atmel_tdes_cra_init, | ||
727 | .cra_exit = atmel_tdes_cra_exit, | ||
728 | .cra_u.ablkcipher = { | ||
729 | .min_keysize = DES_KEY_SIZE, | ||
730 | .max_keysize = DES_KEY_SIZE, | ||
731 | .ivsize = DES_BLOCK_SIZE, | ||
732 | .setkey = atmel_des_setkey, | ||
733 | .encrypt = atmel_tdes_cbc_encrypt, | ||
734 | .decrypt = atmel_tdes_cbc_decrypt, | ||
735 | } | ||
736 | }, | ||
737 | { | ||
738 | .cra_name = "cfb(des)", | ||
739 | .cra_driver_name = "atmel-cfb-des", | ||
740 | .cra_priority = 100, | ||
741 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
742 | .cra_blocksize = DES_BLOCK_SIZE, | ||
743 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
744 | .cra_alignmask = 0, | ||
745 | .cra_type = &crypto_ablkcipher_type, | ||
746 | .cra_module = THIS_MODULE, | ||
747 | .cra_init = atmel_tdes_cra_init, | ||
748 | .cra_exit = atmel_tdes_cra_exit, | ||
749 | .cra_u.ablkcipher = { | ||
750 | .min_keysize = DES_KEY_SIZE, | ||
751 | .max_keysize = DES_KEY_SIZE, | ||
752 | .ivsize = DES_BLOCK_SIZE, | ||
753 | .setkey = atmel_des_setkey, | ||
754 | .encrypt = atmel_tdes_cfb_encrypt, | ||
755 | .decrypt = atmel_tdes_cfb_decrypt, | ||
756 | } | ||
757 | }, | ||
758 | { | ||
759 | .cra_name = "cfb8(des)", | ||
760 | .cra_driver_name = "atmel-cfb8-des", | ||
761 | .cra_priority = 100, | ||
762 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
763 | .cra_blocksize = CFB8_BLOCK_SIZE, | ||
764 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
765 | .cra_alignmask = 0, | ||
766 | .cra_type = &crypto_ablkcipher_type, | ||
767 | .cra_module = THIS_MODULE, | ||
768 | .cra_init = atmel_tdes_cra_init, | ||
769 | .cra_exit = atmel_tdes_cra_exit, | ||
770 | .cra_u.ablkcipher = { | ||
771 | .min_keysize = DES_KEY_SIZE, | ||
772 | .max_keysize = DES_KEY_SIZE, | ||
773 | .ivsize = DES_BLOCK_SIZE, | ||
774 | .setkey = atmel_des_setkey, | ||
775 | .encrypt = atmel_tdes_cfb8_encrypt, | ||
776 | .decrypt = atmel_tdes_cfb8_decrypt, | ||
777 | } | ||
778 | }, | ||
779 | { | ||
780 | .cra_name = "cfb16(des)", | ||
781 | .cra_driver_name = "atmel-cfb16-des", | ||
782 | .cra_priority = 100, | ||
783 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
784 | .cra_blocksize = CFB16_BLOCK_SIZE, | ||
785 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
786 | .cra_alignmask = 0, | ||
787 | .cra_type = &crypto_ablkcipher_type, | ||
788 | .cra_module = THIS_MODULE, | ||
789 | .cra_init = atmel_tdes_cra_init, | ||
790 | .cra_exit = atmel_tdes_cra_exit, | ||
791 | .cra_u.ablkcipher = { | ||
792 | .min_keysize = DES_KEY_SIZE, | ||
793 | .max_keysize = DES_KEY_SIZE, | ||
794 | .ivsize = DES_BLOCK_SIZE, | ||
795 | .setkey = atmel_des_setkey, | ||
796 | .encrypt = atmel_tdes_cfb16_encrypt, | ||
797 | .decrypt = atmel_tdes_cfb16_decrypt, | ||
798 | } | ||
799 | }, | ||
800 | { | ||
801 | .cra_name = "cfb32(des)", | ||
802 | .cra_driver_name = "atmel-cfb32-des", | ||
803 | .cra_priority = 100, | ||
804 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
805 | .cra_blocksize = CFB32_BLOCK_SIZE, | ||
806 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
807 | .cra_alignmask = 0, | ||
808 | .cra_type = &crypto_ablkcipher_type, | ||
809 | .cra_module = THIS_MODULE, | ||
810 | .cra_init = atmel_tdes_cra_init, | ||
811 | .cra_exit = atmel_tdes_cra_exit, | ||
812 | .cra_u.ablkcipher = { | ||
813 | .min_keysize = DES_KEY_SIZE, | ||
814 | .max_keysize = DES_KEY_SIZE, | ||
815 | .ivsize = DES_BLOCK_SIZE, | ||
816 | .setkey = atmel_des_setkey, | ||
817 | .encrypt = atmel_tdes_cfb32_encrypt, | ||
818 | .decrypt = atmel_tdes_cfb32_decrypt, | ||
819 | } | ||
820 | }, | ||
821 | { | ||
822 | .cra_name = "ofb(des)", | ||
823 | .cra_driver_name = "atmel-ofb-des", | ||
824 | .cra_priority = 100, | ||
825 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
826 | .cra_blocksize = DES_BLOCK_SIZE, | ||
827 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
828 | .cra_alignmask = 0, | ||
829 | .cra_type = &crypto_ablkcipher_type, | ||
830 | .cra_module = THIS_MODULE, | ||
831 | .cra_init = atmel_tdes_cra_init, | ||
832 | .cra_exit = atmel_tdes_cra_exit, | ||
833 | .cra_u.ablkcipher = { | ||
834 | .min_keysize = DES_KEY_SIZE, | ||
835 | .max_keysize = DES_KEY_SIZE, | ||
836 | .ivsize = DES_BLOCK_SIZE, | ||
837 | .setkey = atmel_des_setkey, | ||
838 | .encrypt = atmel_tdes_ofb_encrypt, | ||
839 | .decrypt = atmel_tdes_ofb_decrypt, | ||
840 | } | ||
841 | }, | ||
842 | { | ||
843 | .cra_name = "ecb(des3_ede)", | ||
844 | .cra_driver_name = "atmel-ecb-tdes", | ||
845 | .cra_priority = 100, | ||
846 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
847 | .cra_blocksize = DES_BLOCK_SIZE, | ||
848 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
849 | .cra_alignmask = 0, | ||
850 | .cra_type = &crypto_ablkcipher_type, | ||
851 | .cra_module = THIS_MODULE, | ||
852 | .cra_init = atmel_tdes_cra_init, | ||
853 | .cra_exit = atmel_tdes_cra_exit, | ||
854 | .cra_u.ablkcipher = { | ||
855 | .min_keysize = 2 * DES_KEY_SIZE, | ||
856 | .max_keysize = 3 * DES_KEY_SIZE, | ||
857 | .setkey = atmel_tdes_setkey, | ||
858 | .encrypt = atmel_tdes_ecb_encrypt, | ||
859 | .decrypt = atmel_tdes_ecb_decrypt, | ||
860 | } | ||
861 | }, | ||
862 | { | ||
863 | .cra_name = "cbc(des3_ede)", | ||
864 | .cra_driver_name = "atmel-cbc-tdes", | ||
865 | .cra_priority = 100, | ||
866 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
867 | .cra_blocksize = DES_BLOCK_SIZE, | ||
868 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
869 | .cra_alignmask = 0, | ||
870 | .cra_type = &crypto_ablkcipher_type, | ||
871 | .cra_module = THIS_MODULE, | ||
872 | .cra_init = atmel_tdes_cra_init, | ||
873 | .cra_exit = atmel_tdes_cra_exit, | ||
874 | .cra_u.ablkcipher = { | ||
875 | .min_keysize = 2*DES_KEY_SIZE, | ||
876 | .max_keysize = 3*DES_KEY_SIZE, | ||
877 | .ivsize = DES_BLOCK_SIZE, | ||
878 | .setkey = atmel_tdes_setkey, | ||
879 | .encrypt = atmel_tdes_cbc_encrypt, | ||
880 | .decrypt = atmel_tdes_cbc_decrypt, | ||
881 | } | ||
882 | }, | ||
883 | { | ||
884 | .cra_name = "cfb(des3_ede)", | ||
885 | .cra_driver_name = "atmel-cfb-tdes", | ||
886 | .cra_priority = 100, | ||
887 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
888 | .cra_blocksize = DES_BLOCK_SIZE, | ||
889 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
890 | .cra_alignmask = 0, | ||
891 | .cra_type = &crypto_ablkcipher_type, | ||
892 | .cra_module = THIS_MODULE, | ||
893 | .cra_init = atmel_tdes_cra_init, | ||
894 | .cra_exit = atmel_tdes_cra_exit, | ||
895 | .cra_u.ablkcipher = { | ||
896 | .min_keysize = 2*DES_KEY_SIZE, | ||
897 | .max_keysize = 2*DES_KEY_SIZE, | ||
898 | .ivsize = DES_BLOCK_SIZE, | ||
899 | .setkey = atmel_tdes_setkey, | ||
900 | .encrypt = atmel_tdes_cfb_encrypt, | ||
901 | .decrypt = atmel_tdes_cfb_decrypt, | ||
902 | } | ||
903 | }, | ||
904 | { | ||
905 | .cra_name = "cfb8(des3_ede)", | ||
906 | .cra_driver_name = "atmel-cfb8-tdes", | ||
907 | .cra_priority = 100, | ||
908 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
909 | .cra_blocksize = CFB8_BLOCK_SIZE, | ||
910 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
911 | .cra_alignmask = 0, | ||
912 | .cra_type = &crypto_ablkcipher_type, | ||
913 | .cra_module = THIS_MODULE, | ||
914 | .cra_init = atmel_tdes_cra_init, | ||
915 | .cra_exit = atmel_tdes_cra_exit, | ||
916 | .cra_u.ablkcipher = { | ||
917 | .min_keysize = 2*DES_KEY_SIZE, | ||
918 | .max_keysize = 2*DES_KEY_SIZE, | ||
919 | .ivsize = DES_BLOCK_SIZE, | ||
920 | .setkey = atmel_tdes_setkey, | ||
921 | .encrypt = atmel_tdes_cfb8_encrypt, | ||
922 | .decrypt = atmel_tdes_cfb8_decrypt, | ||
923 | } | ||
924 | }, | ||
925 | { | ||
926 | .cra_name = "cfb16(des3_ede)", | ||
927 | .cra_driver_name = "atmel-cfb16-tdes", | ||
928 | .cra_priority = 100, | ||
929 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
930 | .cra_blocksize = CFB16_BLOCK_SIZE, | ||
931 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
932 | .cra_alignmask = 0, | ||
933 | .cra_type = &crypto_ablkcipher_type, | ||
934 | .cra_module = THIS_MODULE, | ||
935 | .cra_init = atmel_tdes_cra_init, | ||
936 | .cra_exit = atmel_tdes_cra_exit, | ||
937 | .cra_u.ablkcipher = { | ||
938 | .min_keysize = 2*DES_KEY_SIZE, | ||
939 | .max_keysize = 2*DES_KEY_SIZE, | ||
940 | .ivsize = DES_BLOCK_SIZE, | ||
941 | .setkey = atmel_tdes_setkey, | ||
942 | .encrypt = atmel_tdes_cfb16_encrypt, | ||
943 | .decrypt = atmel_tdes_cfb16_decrypt, | ||
944 | } | ||
945 | }, | ||
946 | { | ||
947 | .cra_name = "cfb32(des3_ede)", | ||
948 | .cra_driver_name = "atmel-cfb32-tdes", | ||
949 | .cra_priority = 100, | ||
950 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
951 | .cra_blocksize = CFB32_BLOCK_SIZE, | ||
952 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
953 | .cra_alignmask = 0, | ||
954 | .cra_type = &crypto_ablkcipher_type, | ||
955 | .cra_module = THIS_MODULE, | ||
956 | .cra_init = atmel_tdes_cra_init, | ||
957 | .cra_exit = atmel_tdes_cra_exit, | ||
958 | .cra_u.ablkcipher = { | ||
959 | .min_keysize = 2*DES_KEY_SIZE, | ||
960 | .max_keysize = 2*DES_KEY_SIZE, | ||
961 | .ivsize = DES_BLOCK_SIZE, | ||
962 | .setkey = atmel_tdes_setkey, | ||
963 | .encrypt = atmel_tdes_cfb32_encrypt, | ||
964 | .decrypt = atmel_tdes_cfb32_decrypt, | ||
965 | } | ||
966 | }, | ||
967 | { | ||
968 | .cra_name = "ofb(des3_ede)", | ||
969 | .cra_driver_name = "atmel-ofb-tdes", | ||
970 | .cra_priority = 100, | ||
971 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
972 | .cra_blocksize = DES_BLOCK_SIZE, | ||
973 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
974 | .cra_alignmask = 0, | ||
975 | .cra_type = &crypto_ablkcipher_type, | ||
976 | .cra_module = THIS_MODULE, | ||
977 | .cra_init = atmel_tdes_cra_init, | ||
978 | .cra_exit = atmel_tdes_cra_exit, | ||
979 | .cra_u.ablkcipher = { | ||
980 | .min_keysize = 2*DES_KEY_SIZE, | ||
981 | .max_keysize = 3*DES_KEY_SIZE, | ||
982 | .ivsize = DES_BLOCK_SIZE, | ||
983 | .setkey = atmel_tdes_setkey, | ||
984 | .encrypt = atmel_tdes_ofb_encrypt, | ||
985 | .decrypt = atmel_tdes_ofb_decrypt, | ||
986 | } | ||
987 | }, | ||
988 | }; | ||
989 | |||
990 | static void atmel_tdes_queue_task(unsigned long data) | ||
991 | { | ||
992 | struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data; | ||
993 | |||
994 | atmel_tdes_handle_queue(dd, NULL); | ||
995 | } | ||
996 | |||
997 | static void atmel_tdes_done_task(unsigned long data) | ||
998 | { | ||
999 | struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data; | ||
1000 | int err; | ||
1001 | |||
1002 | err = atmel_tdes_crypt_dma_stop(dd); | ||
1003 | |||
1004 | err = dd->err ? : err; | ||
1005 | |||
1006 | if (dd->total && !err) { | ||
1007 | err = atmel_tdes_crypt_dma_start(dd); | ||
1008 | if (!err) | ||
1009 | return; | ||
1010 | } | ||
1011 | |||
1012 | atmel_tdes_finish_req(dd, err); | ||
1013 | atmel_tdes_handle_queue(dd, NULL); | ||
1014 | } | ||
1015 | |||
1016 | static irqreturn_t atmel_tdes_irq(int irq, void *dev_id) | ||
1017 | { | ||
1018 | struct atmel_tdes_dev *tdes_dd = dev_id; | ||
1019 | u32 reg; | ||
1020 | |||
1021 | reg = atmel_tdes_read(tdes_dd, TDES_ISR); | ||
1022 | if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) { | ||
1023 | atmel_tdes_write(tdes_dd, TDES_IDR, reg); | ||
1024 | if (TDES_FLAGS_BUSY & tdes_dd->flags) | ||
1025 | tasklet_schedule(&tdes_dd->done_task); | ||
1026 | else | ||
1027 | dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n"); | ||
1028 | return IRQ_HANDLED; | ||
1029 | } | ||
1030 | |||
1031 | return IRQ_NONE; | ||
1032 | } | ||
1033 | |||
1034 | static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd) | ||
1035 | { | ||
1036 | int i; | ||
1037 | |||
1038 | for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) | ||
1039 | crypto_unregister_alg(&tdes_algs[i]); | ||
1040 | } | ||
1041 | |||
1042 | static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd) | ||
1043 | { | ||
1044 | int err, i, j; | ||
1045 | |||
1046 | for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) { | ||
1047 | INIT_LIST_HEAD(&tdes_algs[i].cra_list); | ||
1048 | err = crypto_register_alg(&tdes_algs[i]); | ||
1049 | if (err) | ||
1050 | goto err_tdes_algs; | ||
1051 | } | ||
1052 | |||
1053 | return 0; | ||
1054 | |||
1055 | err_tdes_algs: | ||
1056 | for (j = 0; j < i; j++) | ||
1057 | crypto_unregister_alg(&tdes_algs[j]); | ||
1058 | |||
1059 | return err; | ||
1060 | } | ||
1061 | |||
1062 | static int __devinit atmel_tdes_probe(struct platform_device *pdev) | ||
1063 | { | ||
1064 | struct atmel_tdes_dev *tdes_dd; | ||
1065 | struct device *dev = &pdev->dev; | ||
1066 | struct resource *tdes_res; | ||
1067 | unsigned long tdes_phys_size; | ||
1068 | int err; | ||
1069 | |||
1070 | tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL); | ||
1071 | if (tdes_dd == NULL) { | ||
1072 | dev_err(dev, "unable to alloc data struct.\n"); | ||
1073 | err = -ENOMEM; | ||
1074 | goto tdes_dd_err; | ||
1075 | } | ||
1076 | |||
1077 | tdes_dd->dev = dev; | ||
1078 | |||
1079 | platform_set_drvdata(pdev, tdes_dd); | ||
1080 | |||
1081 | INIT_LIST_HEAD(&tdes_dd->list); | ||
1082 | |||
1083 | tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task, | ||
1084 | (unsigned long)tdes_dd); | ||
1085 | tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task, | ||
1086 | (unsigned long)tdes_dd); | ||
1087 | |||
1088 | crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH); | ||
1089 | |||
1090 | tdes_dd->irq = -1; | ||
1091 | |||
1092 | /* Get the base address */ | ||
1093 | tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1094 | if (!tdes_res) { | ||
1095 | dev_err(dev, "no MEM resource info\n"); | ||
1096 | err = -ENODEV; | ||
1097 | goto res_err; | ||
1098 | } | ||
1099 | tdes_dd->phys_base = tdes_res->start; | ||
1100 | tdes_phys_size = resource_size(tdes_res); | ||
1101 | |||
1102 | /* Get the IRQ */ | ||
1103 | tdes_dd->irq = platform_get_irq(pdev, 0); | ||
1104 | if (tdes_dd->irq < 0) { | ||
1105 | dev_err(dev, "no IRQ resource info\n"); | ||
1106 | err = tdes_dd->irq; | ||
1107 | goto res_err; | ||
1108 | } | ||
1109 | |||
1110 | err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED, | ||
1111 | "atmel-tdes", tdes_dd); | ||
1112 | if (err) { | ||
1113 | dev_err(dev, "unable to request tdes irq.\n"); | ||
1114 | goto tdes_irq_err; | ||
1115 | } | ||
1116 | |||
1117 | /* Initializing the clock */ | ||
1118 | tdes_dd->iclk = clk_get(&pdev->dev, NULL); | ||
1119 | if (IS_ERR(tdes_dd->iclk)) { | ||
1120 | dev_err(dev, "clock intialization failed.\n"); | ||
1121 | err = PTR_ERR(tdes_dd->iclk); | ||
1122 | goto clk_err; | ||
1123 | } | ||
1124 | |||
1125 | tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size); | ||
1126 | if (!tdes_dd->io_base) { | ||
1127 | dev_err(dev, "can't ioremap\n"); | ||
1128 | err = -ENOMEM; | ||
1129 | goto tdes_io_err; | ||
1130 | } | ||
1131 | |||
1132 | err = atmel_tdes_dma_init(tdes_dd); | ||
1133 | if (err) | ||
1134 | goto err_tdes_dma; | ||
1135 | |||
1136 | spin_lock(&atmel_tdes.lock); | ||
1137 | list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list); | ||
1138 | spin_unlock(&atmel_tdes.lock); | ||
1139 | |||
1140 | err = atmel_tdes_register_algs(tdes_dd); | ||
1141 | if (err) | ||
1142 | goto err_algs; | ||
1143 | |||
1144 | dev_info(dev, "Atmel DES/TDES\n"); | ||
1145 | |||
1146 | return 0; | ||
1147 | |||
1148 | err_algs: | ||
1149 | spin_lock(&atmel_tdes.lock); | ||
1150 | list_del(&tdes_dd->list); | ||
1151 | spin_unlock(&atmel_tdes.lock); | ||
1152 | atmel_tdes_dma_cleanup(tdes_dd); | ||
1153 | err_tdes_dma: | ||
1154 | iounmap(tdes_dd->io_base); | ||
1155 | tdes_io_err: | ||
1156 | clk_put(tdes_dd->iclk); | ||
1157 | clk_err: | ||
1158 | free_irq(tdes_dd->irq, tdes_dd); | ||
1159 | tdes_irq_err: | ||
1160 | res_err: | ||
1161 | tasklet_kill(&tdes_dd->done_task); | ||
1162 | tasklet_kill(&tdes_dd->queue_task); | ||
1163 | kfree(tdes_dd); | ||
1164 | tdes_dd = NULL; | ||
1165 | tdes_dd_err: | ||
1166 | dev_err(dev, "initialization failed.\n"); | ||
1167 | |||
1168 | return err; | ||
1169 | } | ||
1170 | |||
1171 | static int __devexit atmel_tdes_remove(struct platform_device *pdev) | ||
1172 | { | ||
1173 | static struct atmel_tdes_dev *tdes_dd; | ||
1174 | |||
1175 | tdes_dd = platform_get_drvdata(pdev); | ||
1176 | if (!tdes_dd) | ||
1177 | return -ENODEV; | ||
1178 | spin_lock(&atmel_tdes.lock); | ||
1179 | list_del(&tdes_dd->list); | ||
1180 | spin_unlock(&atmel_tdes.lock); | ||
1181 | |||
1182 | atmel_tdes_unregister_algs(tdes_dd); | ||
1183 | |||
1184 | tasklet_kill(&tdes_dd->done_task); | ||
1185 | tasklet_kill(&tdes_dd->queue_task); | ||
1186 | |||
1187 | atmel_tdes_dma_cleanup(tdes_dd); | ||
1188 | |||
1189 | iounmap(tdes_dd->io_base); | ||
1190 | |||
1191 | clk_put(tdes_dd->iclk); | ||
1192 | |||
1193 | if (tdes_dd->irq >= 0) | ||
1194 | free_irq(tdes_dd->irq, tdes_dd); | ||
1195 | |||
1196 | kfree(tdes_dd); | ||
1197 | tdes_dd = NULL; | ||
1198 | |||
1199 | return 0; | ||
1200 | } | ||
1201 | |||
1202 | static struct platform_driver atmel_tdes_driver = { | ||
1203 | .probe = atmel_tdes_probe, | ||
1204 | .remove = __devexit_p(atmel_tdes_remove), | ||
1205 | .driver = { | ||
1206 | .name = "atmel_tdes", | ||
1207 | .owner = THIS_MODULE, | ||
1208 | }, | ||
1209 | }; | ||
1210 | |||
1211 | module_platform_driver(atmel_tdes_driver); | ||
1212 | |||
1213 | MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support."); | ||
1214 | MODULE_LICENSE("GPL v2"); | ||
1215 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); | ||
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c new file mode 100644 index 000000000000..5398580b4313 --- /dev/null +++ b/drivers/crypto/bfin_crc.c | |||
@@ -0,0 +1,780 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support Blackfin CRC HW acceleration. | ||
5 | * | ||
6 | * Copyright 2012 Analog Devices Inc. | ||
7 | * | ||
8 | * Licensed under the GPL-2. | ||
9 | */ | ||
10 | |||
11 | #include <linux/err.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/scatterlist.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/unaligned/access_ok.h> | ||
25 | #include <linux/crypto.h> | ||
26 | #include <linux/cryptohash.h> | ||
27 | #include <crypto/scatterwalk.h> | ||
28 | #include <crypto/algapi.h> | ||
29 | #include <crypto/hash.h> | ||
30 | #include <crypto/internal/hash.h> | ||
31 | |||
32 | #include <asm/blackfin.h> | ||
33 | #include <asm/bfin_crc.h> | ||
34 | #include <asm/dma.h> | ||
35 | #include <asm/portmux.h> | ||
36 | |||
37 | #define CRC_CCRYPTO_QUEUE_LENGTH 5 | ||
38 | |||
39 | #define DRIVER_NAME "bfin-hmac-crc" | ||
40 | #define CHKSUM_DIGEST_SIZE 4 | ||
41 | #define CHKSUM_BLOCK_SIZE 1 | ||
42 | |||
43 | #define CRC_MAX_DMA_DESC 100 | ||
44 | |||
45 | #define CRC_CRYPTO_STATE_UPDATE 1 | ||
46 | #define CRC_CRYPTO_STATE_FINALUPDATE 2 | ||
47 | #define CRC_CRYPTO_STATE_FINISH 3 | ||
48 | |||
49 | struct bfin_crypto_crc { | ||
50 | struct list_head list; | ||
51 | struct device *dev; | ||
52 | spinlock_t lock; | ||
53 | |||
54 | int irq; | ||
55 | int dma_ch; | ||
56 | u32 poly; | ||
57 | volatile struct crc_register *regs; | ||
58 | |||
59 | struct ahash_request *req; /* current request in operation */ | ||
60 | struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */ | ||
61 | dma_addr_t sg_dma; /* phy addr of sg dma descriptors */ | ||
62 | u8 *sg_mid_buf; | ||
63 | |||
64 | struct tasklet_struct done_task; | ||
65 | struct crypto_queue queue; /* waiting requests */ | ||
66 | |||
67 | u8 busy:1; /* crc device in operation flag */ | ||
68 | }; | ||
69 | |||
70 | static struct bfin_crypto_crc_list { | ||
71 | struct list_head dev_list; | ||
72 | spinlock_t lock; | ||
73 | } crc_list; | ||
74 | |||
75 | struct bfin_crypto_crc_reqctx { | ||
76 | struct bfin_crypto_crc *crc; | ||
77 | |||
78 | unsigned int total; /* total request bytes */ | ||
79 | size_t sg_buflen; /* bytes for this update */ | ||
80 | unsigned int sg_nents; | ||
81 | struct scatterlist *sg; /* sg list head for this update*/ | ||
82 | struct scatterlist bufsl[2]; /* chained sg list */ | ||
83 | |||
84 | size_t bufnext_len; | ||
85 | size_t buflast_len; | ||
86 | u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */ | ||
87 | u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */ | ||
88 | |||
89 | u8 flag; | ||
90 | }; | ||
91 | |||
92 | struct bfin_crypto_crc_ctx { | ||
93 | struct bfin_crypto_crc *crc; | ||
94 | u32 key; | ||
95 | }; | ||
96 | |||
97 | |||
98 | /* | ||
99 | * derive number of elements in scatterlist | ||
100 | */ | ||
101 | static int sg_count(struct scatterlist *sg_list) | ||
102 | { | ||
103 | struct scatterlist *sg = sg_list; | ||
104 | int sg_nents = 1; | ||
105 | |||
106 | if (sg_list == NULL) | ||
107 | return 0; | ||
108 | |||
109 | while (!sg_is_last(sg)) { | ||
110 | sg_nents++; | ||
111 | sg = scatterwalk_sg_next(sg); | ||
112 | } | ||
113 | |||
114 | return sg_nents; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * get element in scatter list by given index | ||
119 | */ | ||
120 | static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents, | ||
121 | unsigned int index) | ||
122 | { | ||
123 | struct scatterlist *sg = NULL; | ||
124 | int i; | ||
125 | |||
126 | for_each_sg(sg_list, sg, nents, i) | ||
127 | if (i == index) | ||
128 | break; | ||
129 | |||
130 | return sg; | ||
131 | } | ||
132 | |||
133 | static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key) | ||
134 | { | ||
135 | crc->regs->datacntrld = 0; | ||
136 | crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET; | ||
137 | crc->regs->curresult = key; | ||
138 | |||
139 | /* setup CRC interrupts */ | ||
140 | crc->regs->status = CMPERRI | DCNTEXPI; | ||
141 | crc->regs->intrenset = CMPERRI | DCNTEXPI; | ||
142 | SSYNC(); | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int bfin_crypto_crc_init(struct ahash_request *req) | ||
148 | { | ||
149 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
150 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); | ||
151 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | ||
152 | struct bfin_crypto_crc *crc; | ||
153 | |||
154 | dev_dbg(crc->dev, "crc_init\n"); | ||
155 | spin_lock_bh(&crc_list.lock); | ||
156 | list_for_each_entry(crc, &crc_list.dev_list, list) { | ||
157 | crc_ctx->crc = crc; | ||
158 | break; | ||
159 | } | ||
160 | spin_unlock_bh(&crc_list.lock); | ||
161 | |||
162 | if (sg_count(req->src) > CRC_MAX_DMA_DESC) { | ||
163 | dev_dbg(crc->dev, "init: requested sg list is too big > %d\n", | ||
164 | CRC_MAX_DMA_DESC); | ||
165 | return -EINVAL; | ||
166 | } | ||
167 | |||
168 | ctx->crc = crc; | ||
169 | ctx->bufnext_len = 0; | ||
170 | ctx->buflast_len = 0; | ||
171 | ctx->sg_buflen = 0; | ||
172 | ctx->total = 0; | ||
173 | ctx->flag = 0; | ||
174 | |||
175 | /* init crc results */ | ||
176 | put_unaligned_le32(crc_ctx->key, req->result); | ||
177 | |||
178 | dev_dbg(crc->dev, "init: digest size: %d\n", | ||
179 | crypto_ahash_digestsize(tfm)); | ||
180 | |||
181 | return bfin_crypto_crc_init_hw(crc, crc_ctx->key); | ||
182 | } | ||
183 | |||
184 | static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc) | ||
185 | { | ||
186 | struct scatterlist *sg; | ||
187 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req); | ||
188 | int i = 0, j = 0; | ||
189 | unsigned long dma_config; | ||
190 | unsigned int dma_count; | ||
191 | unsigned int dma_addr; | ||
192 | unsigned int mid_dma_count = 0; | ||
193 | int dma_mod; | ||
194 | |||
195 | dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE); | ||
196 | |||
197 | for_each_sg(ctx->sg, sg, ctx->sg_nents, j) { | ||
198 | dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32; | ||
199 | dma_addr = sg_dma_address(sg); | ||
200 | /* deduce extra bytes in last sg */ | ||
201 | if (sg_is_last(sg)) | ||
202 | dma_count = sg_dma_len(sg) - ctx->bufnext_len; | ||
203 | else | ||
204 | dma_count = sg_dma_len(sg); | ||
205 | |||
206 | if (mid_dma_count) { | ||
207 | /* Append last middle dma buffer to 4 bytes with first | ||
208 | bytes in current sg buffer. Move addr of current | ||
209 | sg and deduce the length of current sg. | ||
210 | */ | ||
211 | memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count, | ||
212 | (void *)dma_addr, | ||
213 | CHKSUM_DIGEST_SIZE - mid_dma_count); | ||
214 | dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count; | ||
215 | dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count; | ||
216 | } | ||
217 | /* chop current sg dma len to multiple of 32 bits */ | ||
218 | mid_dma_count = dma_count % 4; | ||
219 | dma_count &= ~0x3; | ||
220 | |||
221 | if (dma_addr % 4 == 0) { | ||
222 | dma_config |= WDSIZE_32; | ||
223 | dma_count >>= 2; | ||
224 | dma_mod = 4; | ||
225 | } else if (dma_addr % 2 == 0) { | ||
226 | dma_config |= WDSIZE_16; | ||
227 | dma_count >>= 1; | ||
228 | dma_mod = 2; | ||
229 | } else { | ||
230 | dma_config |= WDSIZE_8; | ||
231 | dma_mod = 1; | ||
232 | } | ||
233 | |||
234 | crc->sg_cpu[i].start_addr = dma_addr; | ||
235 | crc->sg_cpu[i].cfg = dma_config; | ||
236 | crc->sg_cpu[i].x_count = dma_count; | ||
237 | crc->sg_cpu[i].x_modify = dma_mod; | ||
238 | dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " | ||
239 | "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", | ||
240 | i, crc->sg_cpu[i].start_addr, | ||
241 | crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, | ||
242 | crc->sg_cpu[i].x_modify); | ||
243 | i++; | ||
244 | |||
245 | if (mid_dma_count) { | ||
246 | /* copy extra bytes to next middle dma buffer */ | ||
247 | dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | | ||
248 | DMAEN | PSIZE_32 | WDSIZE_32; | ||
249 | memcpy(crc->sg_mid_buf + (i << 2), | ||
250 | (void *)(dma_addr + (dma_count << 2)), | ||
251 | mid_dma_count); | ||
252 | /* setup new dma descriptor for next middle dma */ | ||
253 | crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, | ||
254 | crc->sg_mid_buf + (i << 2), | ||
255 | CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE); | ||
256 | crc->sg_cpu[i].cfg = dma_config; | ||
257 | crc->sg_cpu[i].x_count = 1; | ||
258 | crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE; | ||
259 | dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " | ||
260 | "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", | ||
261 | i, crc->sg_cpu[i].start_addr, | ||
262 | crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, | ||
263 | crc->sg_cpu[i].x_modify); | ||
264 | i++; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32; | ||
269 | /* For final update req, append the buffer for next update as well*/ | ||
270 | if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || | ||
271 | ctx->flag == CRC_CRYPTO_STATE_FINISH)) { | ||
272 | crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext, | ||
273 | CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE); | ||
274 | crc->sg_cpu[i].cfg = dma_config; | ||
275 | crc->sg_cpu[i].x_count = 1; | ||
276 | crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE; | ||
277 | dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " | ||
278 | "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", | ||
279 | i, crc->sg_cpu[i].start_addr, | ||
280 | crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, | ||
281 | crc->sg_cpu[i].x_modify); | ||
282 | i++; | ||
283 | } | ||
284 | |||
285 | if (i == 0) | ||
286 | return; | ||
287 | |||
288 | flush_dcache_range((unsigned int)crc->sg_cpu, | ||
289 | (unsigned int)crc->sg_cpu + | ||
290 | i * sizeof(struct dma_desc_array)); | ||
291 | |||
292 | /* Set the last descriptor to stop mode */ | ||
293 | crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE); | ||
294 | crc->sg_cpu[i - 1].cfg |= DI_EN; | ||
295 | set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma); | ||
296 | set_dma_x_count(crc->dma_ch, 0); | ||
297 | set_dma_x_modify(crc->dma_ch, 0); | ||
298 | SSYNC(); | ||
299 | set_dma_config(crc->dma_ch, dma_config); | ||
300 | } | ||
301 | |||
302 | static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc, | ||
303 | struct ahash_request *req) | ||
304 | { | ||
305 | struct crypto_async_request *async_req, *backlog; | ||
306 | struct bfin_crypto_crc_reqctx *ctx; | ||
307 | struct scatterlist *sg; | ||
308 | int ret = 0; | ||
309 | int nsg, i, j; | ||
310 | unsigned int nextlen; | ||
311 | unsigned long flags; | ||
312 | |||
313 | spin_lock_irqsave(&crc->lock, flags); | ||
314 | if (req) | ||
315 | ret = ahash_enqueue_request(&crc->queue, req); | ||
316 | if (crc->busy) { | ||
317 | spin_unlock_irqrestore(&crc->lock, flags); | ||
318 | return ret; | ||
319 | } | ||
320 | backlog = crypto_get_backlog(&crc->queue); | ||
321 | async_req = crypto_dequeue_request(&crc->queue); | ||
322 | if (async_req) | ||
323 | crc->busy = 1; | ||
324 | spin_unlock_irqrestore(&crc->lock, flags); | ||
325 | |||
326 | if (!async_req) | ||
327 | return ret; | ||
328 | |||
329 | if (backlog) | ||
330 | backlog->complete(backlog, -EINPROGRESS); | ||
331 | |||
332 | req = ahash_request_cast(async_req); | ||
333 | crc->req = req; | ||
334 | ctx = ahash_request_ctx(req); | ||
335 | ctx->sg = NULL; | ||
336 | ctx->sg_buflen = 0; | ||
337 | ctx->sg_nents = 0; | ||
338 | |||
339 | dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n", | ||
340 | ctx->flag, req->nbytes); | ||
341 | |||
342 | if (ctx->flag == CRC_CRYPTO_STATE_FINISH) { | ||
343 | if (ctx->bufnext_len == 0) { | ||
344 | crc->busy = 0; | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | /* Pack last crc update buffer to 32bit */ | ||
349 | memset(ctx->bufnext + ctx->bufnext_len, 0, | ||
350 | CHKSUM_DIGEST_SIZE - ctx->bufnext_len); | ||
351 | } else { | ||
352 | /* Pack small data which is less than 32bit to buffer for next update. */ | ||
353 | if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) { | ||
354 | memcpy(ctx->bufnext + ctx->bufnext_len, | ||
355 | sg_virt(req->src), req->nbytes); | ||
356 | ctx->bufnext_len += req->nbytes; | ||
357 | if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE && | ||
358 | ctx->bufnext_len) { | ||
359 | goto finish_update; | ||
360 | } else { | ||
361 | crc->busy = 0; | ||
362 | return 0; | ||
363 | } | ||
364 | } | ||
365 | |||
366 | if (ctx->bufnext_len) { | ||
367 | /* Chain in extra bytes of last update */ | ||
368 | ctx->buflast_len = ctx->bufnext_len; | ||
369 | memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len); | ||
370 | |||
371 | nsg = ctx->sg_buflen ? 2 : 1; | ||
372 | sg_init_table(ctx->bufsl, nsg); | ||
373 | sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); | ||
374 | if (nsg > 1) | ||
375 | scatterwalk_sg_chain(ctx->bufsl, nsg, | ||
376 | req->src); | ||
377 | ctx->sg = ctx->bufsl; | ||
378 | } else | ||
379 | ctx->sg = req->src; | ||
380 | |||
381 | /* Chop crc buffer size to multiple of 32 bit */ | ||
382 | nsg = ctx->sg_nents = sg_count(ctx->sg); | ||
383 | ctx->sg_buflen = ctx->buflast_len + req->nbytes; | ||
384 | ctx->bufnext_len = ctx->sg_buflen % 4; | ||
385 | ctx->sg_buflen &= ~0x3; | ||
386 | |||
387 | if (ctx->bufnext_len) { | ||
388 | /* copy extra bytes to buffer for next update */ | ||
389 | memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE); | ||
390 | nextlen = ctx->bufnext_len; | ||
391 | for (i = nsg - 1; i >= 0; i--) { | ||
392 | sg = sg_get(ctx->sg, nsg, i); | ||
393 | j = min(nextlen, sg_dma_len(sg)); | ||
394 | memcpy(ctx->bufnext + nextlen - j, | ||
395 | sg_virt(sg) + sg_dma_len(sg) - j, j); | ||
396 | if (j == sg_dma_len(sg)) | ||
397 | ctx->sg_nents--; | ||
398 | nextlen -= j; | ||
399 | if (nextlen == 0) | ||
400 | break; | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | |||
405 | finish_update: | ||
406 | if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || | ||
407 | ctx->flag == CRC_CRYPTO_STATE_FINISH)) | ||
408 | ctx->sg_buflen += CHKSUM_DIGEST_SIZE; | ||
409 | |||
410 | /* set CRC data count before start DMA */ | ||
411 | crc->regs->datacnt = ctx->sg_buflen >> 2; | ||
412 | |||
413 | /* setup and enable CRC DMA */ | ||
414 | bfin_crypto_crc_config_dma(crc); | ||
415 | |||
416 | /* finally kick off CRC operation */ | ||
417 | crc->regs->control |= BLKEN; | ||
418 | SSYNC(); | ||
419 | |||
420 | return -EINPROGRESS; | ||
421 | } | ||
422 | |||
423 | static int bfin_crypto_crc_update(struct ahash_request *req) | ||
424 | { | ||
425 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | ||
426 | |||
427 | if (!req->nbytes) | ||
428 | return 0; | ||
429 | |||
430 | dev_dbg(ctx->crc->dev, "crc_update\n"); | ||
431 | ctx->total += req->nbytes; | ||
432 | ctx->flag = CRC_CRYPTO_STATE_UPDATE; | ||
433 | |||
434 | return bfin_crypto_crc_handle_queue(ctx->crc, req); | ||
435 | } | ||
436 | |||
437 | static int bfin_crypto_crc_final(struct ahash_request *req) | ||
438 | { | ||
439 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
440 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); | ||
441 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | ||
442 | |||
443 | dev_dbg(ctx->crc->dev, "crc_final\n"); | ||
444 | ctx->flag = CRC_CRYPTO_STATE_FINISH; | ||
445 | crc_ctx->key = 0; | ||
446 | |||
447 | return bfin_crypto_crc_handle_queue(ctx->crc, req); | ||
448 | } | ||
449 | |||
450 | static int bfin_crypto_crc_finup(struct ahash_request *req) | ||
451 | { | ||
452 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
453 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); | ||
454 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | ||
455 | |||
456 | dev_dbg(ctx->crc->dev, "crc_finishupdate\n"); | ||
457 | ctx->total += req->nbytes; | ||
458 | ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE; | ||
459 | crc_ctx->key = 0; | ||
460 | |||
461 | return bfin_crypto_crc_handle_queue(ctx->crc, req); | ||
462 | } | ||
463 | |||
464 | static int bfin_crypto_crc_digest(struct ahash_request *req) | ||
465 | { | ||
466 | int ret; | ||
467 | |||
468 | ret = bfin_crypto_crc_init(req); | ||
469 | if (ret) | ||
470 | return ret; | ||
471 | |||
472 | return bfin_crypto_crc_finup(req); | ||
473 | } | ||
474 | |||
475 | static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
476 | unsigned int keylen) | ||
477 | { | ||
478 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); | ||
479 | |||
480 | dev_dbg(crc_ctx->crc->dev, "crc_setkey\n"); | ||
481 | if (keylen != CHKSUM_DIGEST_SIZE) { | ||
482 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
483 | return -EINVAL; | ||
484 | } | ||
485 | |||
486 | crc_ctx->key = get_unaligned_le32(key); | ||
487 | |||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm) | ||
492 | { | ||
493 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm); | ||
494 | |||
495 | crc_ctx->key = 0; | ||
496 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
497 | sizeof(struct bfin_crypto_crc_reqctx)); | ||
498 | |||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm) | ||
503 | { | ||
504 | } | ||
505 | |||
506 | static struct ahash_alg algs = { | ||
507 | .init = bfin_crypto_crc_init, | ||
508 | .update = bfin_crypto_crc_update, | ||
509 | .final = bfin_crypto_crc_final, | ||
510 | .finup = bfin_crypto_crc_finup, | ||
511 | .digest = bfin_crypto_crc_digest, | ||
512 | .setkey = bfin_crypto_crc_setkey, | ||
513 | .halg.digestsize = CHKSUM_DIGEST_SIZE, | ||
514 | .halg.base = { | ||
515 | .cra_name = "hmac(crc32)", | ||
516 | .cra_driver_name = DRIVER_NAME, | ||
517 | .cra_priority = 100, | ||
518 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
519 | CRYPTO_ALG_ASYNC, | ||
520 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | ||
521 | .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), | ||
522 | .cra_alignmask = 3, | ||
523 | .cra_module = THIS_MODULE, | ||
524 | .cra_init = bfin_crypto_crc_cra_init, | ||
525 | .cra_exit = bfin_crypto_crc_cra_exit, | ||
526 | } | ||
527 | }; | ||
528 | |||
529 | static void bfin_crypto_crc_done_task(unsigned long data) | ||
530 | { | ||
531 | struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data; | ||
532 | |||
533 | bfin_crypto_crc_handle_queue(crc, NULL); | ||
534 | } | ||
535 | |||
536 | static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id) | ||
537 | { | ||
538 | struct bfin_crypto_crc *crc = dev_id; | ||
539 | |||
540 | if (crc->regs->status & DCNTEXP) { | ||
541 | crc->regs->status = DCNTEXP; | ||
542 | SSYNC(); | ||
543 | |||
544 | /* prepare results */ | ||
545 | put_unaligned_le32(crc->regs->result, crc->req->result); | ||
546 | |||
547 | crc->regs->control &= ~BLKEN; | ||
548 | crc->busy = 0; | ||
549 | |||
550 | if (crc->req->base.complete) | ||
551 | crc->req->base.complete(&crc->req->base, 0); | ||
552 | |||
553 | tasklet_schedule(&crc->done_task); | ||
554 | |||
555 | return IRQ_HANDLED; | ||
556 | } else | ||
557 | return IRQ_NONE; | ||
558 | } | ||
559 | |||
560 | #ifdef CONFIG_PM | ||
561 | /** | ||
562 | * bfin_crypto_crc_suspend - suspend crc device | ||
563 | * @pdev: device being suspended | ||
564 | * @state: requested suspend state | ||
565 | */ | ||
566 | static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state) | ||
567 | { | ||
568 | struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); | ||
569 | int i = 100000; | ||
570 | |||
571 | while ((crc->regs->control & BLKEN) && --i) | ||
572 | cpu_relax(); | ||
573 | |||
574 | if (i == 0) | ||
575 | return -EBUSY; | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | #else | ||
580 | # define bfin_crypto_crc_suspend NULL | ||
581 | #endif | ||
582 | |||
583 | #define bfin_crypto_crc_resume NULL | ||
584 | |||
585 | /** | ||
586 | * bfin_crypto_crc_probe - Initialize module | ||
587 | * | ||
588 | */ | ||
589 | static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev) | ||
590 | { | ||
591 | struct device *dev = &pdev->dev; | ||
592 | struct resource *res; | ||
593 | struct bfin_crypto_crc *crc; | ||
594 | unsigned int timeout = 100000; | ||
595 | int ret; | ||
596 | |||
597 | crc = kzalloc(sizeof(*crc), GFP_KERNEL); | ||
598 | if (!crc) { | ||
599 | dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n"); | ||
600 | return -ENOMEM; | ||
601 | } | ||
602 | |||
603 | crc->dev = dev; | ||
604 | |||
605 | INIT_LIST_HEAD(&crc->list); | ||
606 | spin_lock_init(&crc->lock); | ||
607 | tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc); | ||
608 | crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH); | ||
609 | |||
610 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
611 | if (res == NULL) { | ||
612 | dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); | ||
613 | ret = -ENOENT; | ||
614 | goto out_error_free_mem; | ||
615 | } | ||
616 | |||
617 | crc->regs = ioremap(res->start, resource_size(res)); | ||
618 | if (!crc->regs) { | ||
619 | dev_err(&pdev->dev, "Cannot map CRC IO\n"); | ||
620 | ret = -ENXIO; | ||
621 | goto out_error_free_mem; | ||
622 | } | ||
623 | |||
624 | crc->irq = platform_get_irq(pdev, 0); | ||
625 | if (crc->irq < 0) { | ||
626 | dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n"); | ||
627 | ret = -ENOENT; | ||
628 | goto out_error_unmap; | ||
629 | } | ||
630 | |||
631 | ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc); | ||
632 | if (ret) { | ||
633 | dev_err(&pdev->dev, "Unable to request blackfin crc irq\n"); | ||
634 | goto out_error_unmap; | ||
635 | } | ||
636 | |||
637 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
638 | if (res == NULL) { | ||
639 | dev_err(&pdev->dev, "No CRC DMA channel specified\n"); | ||
640 | ret = -ENOENT; | ||
641 | goto out_error_irq; | ||
642 | } | ||
643 | crc->dma_ch = res->start; | ||
644 | |||
645 | ret = request_dma(crc->dma_ch, dev_name(dev)); | ||
646 | if (ret) { | ||
647 | dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n"); | ||
648 | goto out_error_irq; | ||
649 | } | ||
650 | |||
651 | crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL); | ||
652 | if (crc->sg_cpu == NULL) { | ||
653 | ret = -ENOMEM; | ||
654 | goto out_error_dma; | ||
655 | } | ||
656 | /* | ||
657 | * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle + | ||
658 | * 1 last + 1 next dma descriptors | ||
659 | */ | ||
660 | crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1)); | ||
661 | |||
662 | crc->regs->control = 0; | ||
663 | SSYNC(); | ||
664 | crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data; | ||
665 | SSYNC(); | ||
666 | |||
667 | while (!(crc->regs->status & LUTDONE) && (--timeout) > 0) | ||
668 | cpu_relax(); | ||
669 | |||
670 | if (timeout == 0) | ||
671 | dev_info(&pdev->dev, "init crc poly timeout\n"); | ||
672 | |||
673 | spin_lock(&crc_list.lock); | ||
674 | list_add(&crc->list, &crc_list.dev_list); | ||
675 | spin_unlock(&crc_list.lock); | ||
676 | |||
677 | platform_set_drvdata(pdev, crc); | ||
678 | |||
679 | ret = crypto_register_ahash(&algs); | ||
680 | if (ret) { | ||
681 | spin_lock(&crc_list.lock); | ||
682 | list_del(&crc->list); | ||
683 | spin_unlock(&crc_list.lock); | ||
684 | dev_err(&pdev->dev, "Cann't register crypto ahash device\n"); | ||
685 | goto out_error_dma; | ||
686 | } | ||
687 | |||
688 | dev_info(&pdev->dev, "initialized\n"); | ||
689 | |||
690 | return 0; | ||
691 | |||
692 | out_error_dma: | ||
693 | if (crc->sg_cpu) | ||
694 | dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma); | ||
695 | free_dma(crc->dma_ch); | ||
696 | out_error_irq: | ||
697 | free_irq(crc->irq, crc->dev); | ||
698 | out_error_unmap: | ||
699 | iounmap((void *)crc->regs); | ||
700 | out_error_free_mem: | ||
701 | kfree(crc); | ||
702 | |||
703 | return ret; | ||
704 | } | ||
705 | |||
706 | /** | ||
707 | * bfin_crypto_crc_remove - Initialize module | ||
708 | * | ||
709 | */ | ||
710 | static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev) | ||
711 | { | ||
712 | struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); | ||
713 | |||
714 | if (!crc) | ||
715 | return -ENODEV; | ||
716 | |||
717 | spin_lock(&crc_list.lock); | ||
718 | list_del(&crc->list); | ||
719 | spin_unlock(&crc_list.lock); | ||
720 | |||
721 | crypto_unregister_ahash(&algs); | ||
722 | tasklet_kill(&crc->done_task); | ||
723 | iounmap((void *)crc->regs); | ||
724 | free_dma(crc->dma_ch); | ||
725 | if (crc->irq > 0) | ||
726 | free_irq(crc->irq, crc->dev); | ||
727 | kfree(crc); | ||
728 | |||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | static struct platform_driver bfin_crypto_crc_driver = { | ||
733 | .probe = bfin_crypto_crc_probe, | ||
734 | .remove = __devexit_p(bfin_crypto_crc_remove), | ||
735 | .suspend = bfin_crypto_crc_suspend, | ||
736 | .resume = bfin_crypto_crc_resume, | ||
737 | .driver = { | ||
738 | .name = DRIVER_NAME, | ||
739 | .owner = THIS_MODULE, | ||
740 | }, | ||
741 | }; | ||
742 | |||
743 | /** | ||
744 | * bfin_crypto_crc_mod_init - Initialize module | ||
745 | * | ||
746 | * Checks the module params and registers the platform driver. | ||
747 | * Real work is in the platform probe function. | ||
748 | */ | ||
749 | static int __init bfin_crypto_crc_mod_init(void) | ||
750 | { | ||
751 | int ret; | ||
752 | |||
753 | pr_info("Blackfin hardware CRC crypto driver\n"); | ||
754 | |||
755 | INIT_LIST_HEAD(&crc_list.dev_list); | ||
756 | spin_lock_init(&crc_list.lock); | ||
757 | |||
758 | ret = platform_driver_register(&bfin_crypto_crc_driver); | ||
759 | if (ret) { | ||
760 | pr_info(KERN_ERR "unable to register driver\n"); | ||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | return 0; | ||
765 | } | ||
766 | |||
767 | /** | ||
768 | * bfin_crypto_crc_mod_exit - Deinitialize module | ||
769 | */ | ||
770 | static void __exit bfin_crypto_crc_mod_exit(void) | ||
771 | { | ||
772 | platform_driver_unregister(&bfin_crypto_crc_driver); | ||
773 | } | ||
774 | |||
775 | module_init(bfin_crypto_crc_mod_init); | ||
776 | module_exit(bfin_crypto_crc_mod_exit); | ||
777 | |||
778 | MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); | ||
779 | MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver"); | ||
780 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 2d876bb98ff4..65c7668614ab 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
@@ -32,10 +32,13 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE | |||
32 | config CRYPTO_DEV_FSL_CAAM_INTC | 32 | config CRYPTO_DEV_FSL_CAAM_INTC |
33 | bool "Job Ring interrupt coalescing" | 33 | bool "Job Ring interrupt coalescing" |
34 | depends on CRYPTO_DEV_FSL_CAAM | 34 | depends on CRYPTO_DEV_FSL_CAAM |
35 | default y | 35 | default n |
36 | help | 36 | help |
37 | Enable the Job Ring's interrupt coalescing feature. | 37 | Enable the Job Ring's interrupt coalescing feature. |
38 | 38 | ||
39 | Note: the driver already provides adequate | ||
40 | interrupt coalescing in software. | ||
41 | |||
39 | config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD | 42 | config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD |
40 | int "Job Ring interrupt coalescing count threshold" | 43 | int "Job Ring interrupt coalescing count threshold" |
41 | depends on CRYPTO_DEV_FSL_CAAM_INTC | 44 | depends on CRYPTO_DEV_FSL_CAAM_INTC |
@@ -70,3 +73,28 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
70 | 73 | ||
71 | To compile this as a module, choose M here: the module | 74 | To compile this as a module, choose M here: the module |
72 | will be called caamalg. | 75 | will be called caamalg. |
76 | |||
77 | config CRYPTO_DEV_FSL_CAAM_AHASH_API | ||
78 | tristate "Register hash algorithm implementations with Crypto API" | ||
79 | depends on CRYPTO_DEV_FSL_CAAM | ||
80 | default y | ||
81 | select CRYPTO_AHASH | ||
82 | help | ||
83 | Selecting this will offload ahash for users of the | ||
84 | scatterlist crypto API to the SEC4 via job ring. | ||
85 | |||
86 | To compile this as a module, choose M here: the module | ||
87 | will be called caamhash. | ||
88 | |||
89 | config CRYPTO_DEV_FSL_CAAM_RNG_API | ||
90 | tristate "Register caam device for hwrng API" | ||
91 | depends on CRYPTO_DEV_FSL_CAAM | ||
92 | default y | ||
93 | select CRYPTO_RNG | ||
94 | select HW_RANDOM | ||
95 | help | ||
96 | Selecting this will register the SEC4 hardware rng to | ||
97 | the hw_random API for suppying the kernel entropy pool. | ||
98 | |||
99 | To compile this as a module, choose M here: the module | ||
100 | will be called caamrng. | ||
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index ef39011b4505..b1eb44838db5 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
@@ -4,5 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | 5 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
6 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | 6 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
7 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | ||
8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | ||
7 | 9 | ||
8 | caam-objs := ctrl.o jr.o error.o | 10 | caam-objs := ctrl.o jr.o error.o key_gen.o |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 4eec389184d3..0c1ea8492eff 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -37,9 +37,10 @@ | |||
37 | * | ShareDesc Pointer | | 37 | * | ShareDesc Pointer | |
38 | * | SEQ_OUT_PTR | | 38 | * | SEQ_OUT_PTR | |
39 | * | (output buffer) | | 39 | * | (output buffer) | |
40 | * | (output length) | | ||
40 | * | SEQ_IN_PTR | | 41 | * | SEQ_IN_PTR | |
41 | * | (input buffer) | | 42 | * | (input buffer) | |
42 | * | LOAD (to DECO) | | 43 | * | (input length) | |
43 | * --------------------- | 44 | * --------------------- |
44 | */ | 45 | */ |
45 | 46 | ||
@@ -50,6 +51,8 @@ | |||
50 | #include "desc_constr.h" | 51 | #include "desc_constr.h" |
51 | #include "jr.h" | 52 | #include "jr.h" |
52 | #include "error.h" | 53 | #include "error.h" |
54 | #include "sg_sw_sec4.h" | ||
55 | #include "key_gen.h" | ||
53 | 56 | ||
54 | /* | 57 | /* |
55 | * crypto alg | 58 | * crypto alg |
@@ -62,7 +65,7 @@ | |||
62 | #define CAAM_MAX_IV_LENGTH 16 | 65 | #define CAAM_MAX_IV_LENGTH 16 |
63 | 66 | ||
64 | /* length of descriptors text */ | 67 | /* length of descriptors text */ |
65 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) | 68 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) |
66 | 69 | ||
67 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | 70 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
68 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) | 71 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) |
@@ -143,11 +146,11 @@ static inline void aead_append_ld_iv(u32 *desc, int ivsize) | |||
143 | */ | 146 | */ |
144 | static inline void ablkcipher_append_src_dst(u32 *desc) | 147 | static inline void ablkcipher_append_src_dst(u32 *desc) |
145 | { | 148 | { |
146 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ | 149 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
147 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ | 150 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
148 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \ | 151 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | |
149 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \ | 152 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); |
150 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \ | 153 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); |
151 | } | 154 | } |
152 | 155 | ||
153 | /* | 156 | /* |
@@ -452,121 +455,12 @@ static int aead_setauthsize(struct crypto_aead *authenc, | |||
452 | return 0; | 455 | return 0; |
453 | } | 456 | } |
454 | 457 | ||
455 | struct split_key_result { | 458 | static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, |
456 | struct completion completion; | 459 | u32 authkeylen) |
457 | int err; | ||
458 | }; | ||
459 | |||
460 | static void split_key_done(struct device *dev, u32 *desc, u32 err, | ||
461 | void *context) | ||
462 | { | 460 | { |
463 | struct split_key_result *res = context; | 461 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, |
464 | 462 | ctx->split_key_pad_len, key_in, authkeylen, | |
465 | #ifdef DEBUG | 463 | ctx->alg_op); |
466 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
467 | #endif | ||
468 | |||
469 | if (err) { | ||
470 | char tmp[CAAM_ERROR_STR_MAX]; | ||
471 | |||
472 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
473 | } | ||
474 | |||
475 | res->err = err; | ||
476 | |||
477 | complete(&res->completion); | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | get a split ipad/opad key | ||
482 | |||
483 | Split key generation----------------------------------------------- | ||
484 | |||
485 | [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 | ||
486 | [01] 0x04000014 key: class2->keyreg len=20 | ||
487 | @0xffe01000 | ||
488 | [03] 0x84410014 operation: cls2-op sha1 hmac init dec | ||
489 | [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm | ||
490 | [05] 0xa4000001 jump: class2 local all ->1 [06] | ||
491 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | ||
492 | @0xffe04000 | ||
493 | */ | ||
494 | static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | ||
495 | { | ||
496 | struct device *jrdev = ctx->jrdev; | ||
497 | u32 *desc; | ||
498 | struct split_key_result result; | ||
499 | dma_addr_t dma_addr_in, dma_addr_out; | ||
500 | int ret = 0; | ||
501 | |||
502 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | ||
503 | |||
504 | init_job_desc(desc, 0); | ||
505 | |||
506 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, | ||
507 | DMA_TO_DEVICE); | ||
508 | if (dma_mapping_error(jrdev, dma_addr_in)) { | ||
509 | dev_err(jrdev, "unable to map key input memory\n"); | ||
510 | kfree(desc); | ||
511 | return -ENOMEM; | ||
512 | } | ||
513 | append_key(desc, dma_addr_in, authkeylen, CLASS_2 | | ||
514 | KEY_DEST_CLASS_REG); | ||
515 | |||
516 | /* Sets MDHA up into an HMAC-INIT */ | ||
517 | append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | | ||
518 | OP_ALG_AS_INIT); | ||
519 | |||
520 | /* | ||
521 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | ||
522 | into both pads inside MDHA | ||
523 | */ | ||
524 | append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | | ||
525 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); | ||
526 | |||
527 | /* | ||
528 | * FIFO_STORE with the explicit split-key content store | ||
529 | * (0x26 output type) | ||
530 | */ | ||
531 | dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | ||
532 | DMA_FROM_DEVICE); | ||
533 | if (dma_mapping_error(jrdev, dma_addr_out)) { | ||
534 | dev_err(jrdev, "unable to map key output memory\n"); | ||
535 | kfree(desc); | ||
536 | return -ENOMEM; | ||
537 | } | ||
538 | append_fifo_store(desc, dma_addr_out, ctx->split_key_len, | ||
539 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | ||
540 | |||
541 | #ifdef DEBUG | ||
542 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
543 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); | ||
544 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
545 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
546 | #endif | ||
547 | |||
548 | result.err = 0; | ||
549 | init_completion(&result.completion); | ||
550 | |||
551 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | ||
552 | if (!ret) { | ||
553 | /* in progress */ | ||
554 | wait_for_completion_interruptible(&result.completion); | ||
555 | ret = result.err; | ||
556 | #ifdef DEBUG | ||
557 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
558 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | ||
559 | ctx->split_key_pad_len, 1); | ||
560 | #endif | ||
561 | } | ||
562 | |||
563 | dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, | ||
564 | DMA_FROM_DEVICE); | ||
565 | dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); | ||
566 | |||
567 | kfree(desc); | ||
568 | |||
569 | return ret; | ||
570 | } | 464 | } |
571 | 465 | ||
572 | static int aead_setkey(struct crypto_aead *aead, | 466 | static int aead_setkey(struct crypto_aead *aead, |
@@ -610,7 +504,7 @@ static int aead_setkey(struct crypto_aead *aead, | |||
610 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 504 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
611 | #endif | 505 | #endif |
612 | 506 | ||
613 | ret = gen_split_key(ctx, key, authkeylen); | 507 | ret = gen_split_aead_key(ctx, key, authkeylen); |
614 | if (ret) { | 508 | if (ret) { |
615 | goto badkey; | 509 | goto badkey; |
616 | } | 510 | } |
@@ -757,72 +651,78 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
757 | return ret; | 651 | return ret; |
758 | } | 652 | } |
759 | 653 | ||
760 | struct link_tbl_entry { | ||
761 | u64 ptr; | ||
762 | u32 len; | ||
763 | u8 reserved; | ||
764 | u8 buf_pool_id; | ||
765 | u16 offset; | ||
766 | }; | ||
767 | |||
768 | /* | 654 | /* |
769 | * aead_edesc - s/w-extended aead descriptor | 655 | * aead_edesc - s/w-extended aead descriptor |
770 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | 656 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist |
657 | * @assoc_chained: if source is chained | ||
771 | * @src_nents: number of segments in input scatterlist | 658 | * @src_nents: number of segments in input scatterlist |
659 | * @src_chained: if source is chained | ||
772 | * @dst_nents: number of segments in output scatterlist | 660 | * @dst_nents: number of segments in output scatterlist |
661 | * @dst_chained: if destination is chained | ||
773 | * @iv_dma: dma address of iv for checking continuity and link table | 662 | * @iv_dma: dma address of iv for checking continuity and link table |
774 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | 663 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) |
775 | * @link_tbl_bytes: length of dma mapped link_tbl space | 664 | * @sec4_sg_bytes: length of dma mapped sec4_sg space |
776 | * @link_tbl_dma: bus physical mapped address of h/w link table | 665 | * @sec4_sg_dma: bus physical mapped address of h/w link table |
777 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 666 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
778 | */ | 667 | */ |
779 | struct aead_edesc { | 668 | struct aead_edesc { |
780 | int assoc_nents; | 669 | int assoc_nents; |
670 | bool assoc_chained; | ||
781 | int src_nents; | 671 | int src_nents; |
672 | bool src_chained; | ||
782 | int dst_nents; | 673 | int dst_nents; |
674 | bool dst_chained; | ||
783 | dma_addr_t iv_dma; | 675 | dma_addr_t iv_dma; |
784 | int link_tbl_bytes; | 676 | int sec4_sg_bytes; |
785 | dma_addr_t link_tbl_dma; | 677 | dma_addr_t sec4_sg_dma; |
786 | struct link_tbl_entry *link_tbl; | 678 | struct sec4_sg_entry *sec4_sg; |
787 | u32 hw_desc[0]; | 679 | u32 hw_desc[0]; |
788 | }; | 680 | }; |
789 | 681 | ||
790 | /* | 682 | /* |
791 | * ablkcipher_edesc - s/w-extended ablkcipher descriptor | 683 | * ablkcipher_edesc - s/w-extended ablkcipher descriptor |
792 | * @src_nents: number of segments in input scatterlist | 684 | * @src_nents: number of segments in input scatterlist |
685 | * @src_chained: if source is chained | ||
793 | * @dst_nents: number of segments in output scatterlist | 686 | * @dst_nents: number of segments in output scatterlist |
687 | * @dst_chained: if destination is chained | ||
794 | * @iv_dma: dma address of iv for checking continuity and link table | 688 | * @iv_dma: dma address of iv for checking continuity and link table |
795 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | 689 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) |
796 | * @link_tbl_bytes: length of dma mapped link_tbl space | 690 | * @sec4_sg_bytes: length of dma mapped sec4_sg space |
797 | * @link_tbl_dma: bus physical mapped address of h/w link table | 691 | * @sec4_sg_dma: bus physical mapped address of h/w link table |
798 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 692 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
799 | */ | 693 | */ |
800 | struct ablkcipher_edesc { | 694 | struct ablkcipher_edesc { |
801 | int src_nents; | 695 | int src_nents; |
696 | bool src_chained; | ||
802 | int dst_nents; | 697 | int dst_nents; |
698 | bool dst_chained; | ||
803 | dma_addr_t iv_dma; | 699 | dma_addr_t iv_dma; |
804 | int link_tbl_bytes; | 700 | int sec4_sg_bytes; |
805 | dma_addr_t link_tbl_dma; | 701 | dma_addr_t sec4_sg_dma; |
806 | struct link_tbl_entry *link_tbl; | 702 | struct sec4_sg_entry *sec4_sg; |
807 | u32 hw_desc[0]; | 703 | u32 hw_desc[0]; |
808 | }; | 704 | }; |
809 | 705 | ||
810 | static void caam_unmap(struct device *dev, struct scatterlist *src, | 706 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
811 | struct scatterlist *dst, int src_nents, int dst_nents, | 707 | struct scatterlist *dst, int src_nents, |
812 | dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, | 708 | bool src_chained, int dst_nents, bool dst_chained, |
813 | int link_tbl_bytes) | 709 | dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, |
710 | int sec4_sg_bytes) | ||
814 | { | 711 | { |
815 | if (unlikely(dst != src)) { | 712 | if (dst != src) { |
816 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | 713 | dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, |
817 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); | 714 | src_chained); |
715 | dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, | ||
716 | dst_chained); | ||
818 | } else { | 717 | } else { |
819 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); | 718 | dma_unmap_sg_chained(dev, src, src_nents ? : 1, |
719 | DMA_BIDIRECTIONAL, src_chained); | ||
820 | } | 720 | } |
821 | 721 | ||
822 | if (iv_dma) | 722 | if (iv_dma) |
823 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 723 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
824 | if (link_tbl_bytes) | 724 | if (sec4_sg_bytes) |
825 | dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, | 725 | dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, |
826 | DMA_TO_DEVICE); | 726 | DMA_TO_DEVICE); |
827 | } | 727 | } |
828 | 728 | ||
@@ -833,12 +733,13 @@ static void aead_unmap(struct device *dev, | |||
833 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 733 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
834 | int ivsize = crypto_aead_ivsize(aead); | 734 | int ivsize = crypto_aead_ivsize(aead); |
835 | 735 | ||
836 | dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | 736 | dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, |
737 | DMA_TO_DEVICE, edesc->assoc_chained); | ||
837 | 738 | ||
838 | caam_unmap(dev, req->src, req->dst, | 739 | caam_unmap(dev, req->src, req->dst, |
839 | edesc->src_nents, edesc->dst_nents, | 740 | edesc->src_nents, edesc->src_chained, edesc->dst_nents, |
840 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | 741 | edesc->dst_chained, edesc->iv_dma, ivsize, |
841 | edesc->link_tbl_bytes); | 742 | edesc->sec4_sg_dma, edesc->sec4_sg_bytes); |
842 | } | 743 | } |
843 | 744 | ||
844 | static void ablkcipher_unmap(struct device *dev, | 745 | static void ablkcipher_unmap(struct device *dev, |
@@ -849,9 +750,9 @@ static void ablkcipher_unmap(struct device *dev, | |||
849 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 750 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
850 | 751 | ||
851 | caam_unmap(dev, req->src, req->dst, | 752 | caam_unmap(dev, req->src, req->dst, |
852 | edesc->src_nents, edesc->dst_nents, | 753 | edesc->src_nents, edesc->src_chained, edesc->dst_nents, |
853 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | 754 | edesc->dst_chained, edesc->iv_dma, ivsize, |
854 | edesc->link_tbl_bytes); | 755 | edesc->sec4_sg_dma, edesc->sec4_sg_bytes); |
855 | } | 756 | } |
856 | 757 | ||
857 | static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | 758 | static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, |
@@ -942,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
942 | sizeof(struct iphdr) + req->assoclen + | 843 | sizeof(struct iphdr) + req->assoclen + |
943 | ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + | 844 | ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + |
944 | ctx->authsize + 36, 1); | 845 | ctx->authsize + 36, 1); |
945 | if (!err && edesc->link_tbl_bytes) { | 846 | if (!err && edesc->sec4_sg_bytes) { |
946 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); | 847 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); |
947 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | 848 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", |
948 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | 849 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), |
@@ -1026,50 +927,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
1026 | ablkcipher_request_complete(req, err); | 927 | ablkcipher_request_complete(req, err); |
1027 | } | 928 | } |
1028 | 929 | ||
1029 | static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, | ||
1030 | dma_addr_t dma, u32 len, u32 offset) | ||
1031 | { | ||
1032 | link_tbl_ptr->ptr = dma; | ||
1033 | link_tbl_ptr->len = len; | ||
1034 | link_tbl_ptr->reserved = 0; | ||
1035 | link_tbl_ptr->buf_pool_id = 0; | ||
1036 | link_tbl_ptr->offset = offset; | ||
1037 | #ifdef DEBUG | ||
1038 | print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", | ||
1039 | DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, | ||
1040 | sizeof(struct link_tbl_entry), 1); | ||
1041 | #endif | ||
1042 | } | ||
1043 | |||
1044 | /* | ||
1045 | * convert scatterlist to h/w link table format | ||
1046 | * but does not have final bit; instead, returns last entry | ||
1047 | */ | ||
1048 | static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, | ||
1049 | int sg_count, struct link_tbl_entry | ||
1050 | *link_tbl_ptr, u32 offset) | ||
1051 | { | ||
1052 | while (sg_count) { | ||
1053 | sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), | ||
1054 | sg_dma_len(sg), offset); | ||
1055 | link_tbl_ptr++; | ||
1056 | sg = sg_next(sg); | ||
1057 | sg_count--; | ||
1058 | } | ||
1059 | return link_tbl_ptr - 1; | ||
1060 | } | ||
1061 | |||
1062 | /* | ||
1063 | * convert scatterlist to h/w link table format | ||
1064 | * scatterlist must have been previously dma mapped | ||
1065 | */ | ||
1066 | static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, | ||
1067 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | ||
1068 | { | ||
1069 | link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); | ||
1070 | link_tbl_ptr->len |= 0x40000000; | ||
1071 | } | ||
1072 | |||
1073 | /* | 930 | /* |
1074 | * Fill in aead job descriptor | 931 | * Fill in aead job descriptor |
1075 | */ | 932 | */ |
@@ -1085,7 +942,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
1085 | u32 *desc = edesc->hw_desc; | 942 | u32 *desc = edesc->hw_desc; |
1086 | u32 out_options = 0, in_options; | 943 | u32 out_options = 0, in_options; |
1087 | dma_addr_t dst_dma, src_dma; | 944 | dma_addr_t dst_dma, src_dma; |
1088 | int len, link_tbl_index = 0; | 945 | int len, sec4_sg_index = 0; |
1089 | 946 | ||
1090 | #ifdef DEBUG | 947 | #ifdef DEBUG |
1091 | debug("assoclen %d cryptlen %d authsize %d\n", | 948 | debug("assoclen %d cryptlen %d authsize %d\n", |
@@ -1111,9 +968,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
1111 | src_dma = sg_dma_address(req->assoc); | 968 | src_dma = sg_dma_address(req->assoc); |
1112 | in_options = 0; | 969 | in_options = 0; |
1113 | } else { | 970 | } else { |
1114 | src_dma = edesc->link_tbl_dma; | 971 | src_dma = edesc->sec4_sg_dma; |
1115 | link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + | 972 | sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + |
1116 | (edesc->src_nents ? : 1); | 973 | (edesc->src_nents ? : 1); |
1117 | in_options = LDST_SGF; | 974 | in_options = LDST_SGF; |
1118 | } | 975 | } |
1119 | if (encrypt) | 976 | if (encrypt) |
@@ -1127,7 +984,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
1127 | if (all_contig) { | 984 | if (all_contig) { |
1128 | dst_dma = sg_dma_address(req->src); | 985 | dst_dma = sg_dma_address(req->src); |
1129 | } else { | 986 | } else { |
1130 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * | 987 | dst_dma = src_dma + sizeof(struct sec4_sg_entry) * |
1131 | ((edesc->assoc_nents ? : 1) + 1); | 988 | ((edesc->assoc_nents ? : 1) + 1); |
1132 | out_options = LDST_SGF; | 989 | out_options = LDST_SGF; |
1133 | } | 990 | } |
@@ -1135,9 +992,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
1135 | if (!edesc->dst_nents) { | 992 | if (!edesc->dst_nents) { |
1136 | dst_dma = sg_dma_address(req->dst); | 993 | dst_dma = sg_dma_address(req->dst); |
1137 | } else { | 994 | } else { |
1138 | dst_dma = edesc->link_tbl_dma + | 995 | dst_dma = edesc->sec4_sg_dma + |
1139 | link_tbl_index * | 996 | sec4_sg_index * |
1140 | sizeof(struct link_tbl_entry); | 997 | sizeof(struct sec4_sg_entry); |
1141 | out_options = LDST_SGF; | 998 | out_options = LDST_SGF; |
1142 | } | 999 | } |
1143 | } | 1000 | } |
@@ -1163,7 +1020,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1163 | u32 *desc = edesc->hw_desc; | 1020 | u32 *desc = edesc->hw_desc; |
1164 | u32 out_options = 0, in_options; | 1021 | u32 out_options = 0, in_options; |
1165 | dma_addr_t dst_dma, src_dma; | 1022 | dma_addr_t dst_dma, src_dma; |
1166 | int len, link_tbl_index = 0; | 1023 | int len, sec4_sg_index = 0; |
1167 | 1024 | ||
1168 | #ifdef DEBUG | 1025 | #ifdef DEBUG |
1169 | debug("assoclen %d cryptlen %d authsize %d\n", | 1026 | debug("assoclen %d cryptlen %d authsize %d\n", |
@@ -1188,8 +1045,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1188 | src_dma = sg_dma_address(req->assoc); | 1045 | src_dma = sg_dma_address(req->assoc); |
1189 | in_options = 0; | 1046 | in_options = 0; |
1190 | } else { | 1047 | } else { |
1191 | src_dma = edesc->link_tbl_dma; | 1048 | src_dma = edesc->sec4_sg_dma; |
1192 | link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; | 1049 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; |
1193 | in_options = LDST_SGF; | 1050 | in_options = LDST_SGF; |
1194 | } | 1051 | } |
1195 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | 1052 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + |
@@ -1199,13 +1056,13 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1199 | dst_dma = edesc->iv_dma; | 1056 | dst_dma = edesc->iv_dma; |
1200 | } else { | 1057 | } else { |
1201 | if (likely(req->src == req->dst)) { | 1058 | if (likely(req->src == req->dst)) { |
1202 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * | 1059 | dst_dma = src_dma + sizeof(struct sec4_sg_entry) * |
1203 | edesc->assoc_nents; | 1060 | edesc->assoc_nents; |
1204 | out_options = LDST_SGF; | 1061 | out_options = LDST_SGF; |
1205 | } else { | 1062 | } else { |
1206 | dst_dma = edesc->link_tbl_dma + | 1063 | dst_dma = edesc->sec4_sg_dma + |
1207 | link_tbl_index * | 1064 | sec4_sg_index * |
1208 | sizeof(struct link_tbl_entry); | 1065 | sizeof(struct sec4_sg_entry); |
1209 | out_options = LDST_SGF; | 1066 | out_options = LDST_SGF; |
1210 | } | 1067 | } |
1211 | } | 1068 | } |
@@ -1226,7 +1083,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
1226 | u32 *desc = edesc->hw_desc; | 1083 | u32 *desc = edesc->hw_desc; |
1227 | u32 out_options = 0, in_options; | 1084 | u32 out_options = 0, in_options; |
1228 | dma_addr_t dst_dma, src_dma; | 1085 | dma_addr_t dst_dma, src_dma; |
1229 | int len, link_tbl_index = 0; | 1086 | int len, sec4_sg_index = 0; |
1230 | 1087 | ||
1231 | #ifdef DEBUG | 1088 | #ifdef DEBUG |
1232 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | 1089 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", |
@@ -1244,8 +1101,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
1244 | src_dma = edesc->iv_dma; | 1101 | src_dma = edesc->iv_dma; |
1245 | in_options = 0; | 1102 | in_options = 0; |
1246 | } else { | 1103 | } else { |
1247 | src_dma = edesc->link_tbl_dma; | 1104 | src_dma = edesc->sec4_sg_dma; |
1248 | link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; | 1105 | sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents; |
1249 | in_options = LDST_SGF; | 1106 | in_options = LDST_SGF; |
1250 | } | 1107 | } |
1251 | append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); | 1108 | append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); |
@@ -1254,16 +1111,16 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
1254 | if (!edesc->src_nents && iv_contig) { | 1111 | if (!edesc->src_nents && iv_contig) { |
1255 | dst_dma = sg_dma_address(req->src); | 1112 | dst_dma = sg_dma_address(req->src); |
1256 | } else { | 1113 | } else { |
1257 | dst_dma = edesc->link_tbl_dma + | 1114 | dst_dma = edesc->sec4_sg_dma + |
1258 | sizeof(struct link_tbl_entry); | 1115 | sizeof(struct sec4_sg_entry); |
1259 | out_options = LDST_SGF; | 1116 | out_options = LDST_SGF; |
1260 | } | 1117 | } |
1261 | } else { | 1118 | } else { |
1262 | if (!edesc->dst_nents) { | 1119 | if (!edesc->dst_nents) { |
1263 | dst_dma = sg_dma_address(req->dst); | 1120 | dst_dma = sg_dma_address(req->dst); |
1264 | } else { | 1121 | } else { |
1265 | dst_dma = edesc->link_tbl_dma + | 1122 | dst_dma = edesc->sec4_sg_dma + |
1266 | link_tbl_index * sizeof(struct link_tbl_entry); | 1123 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
1267 | out_options = LDST_SGF; | 1124 | out_options = LDST_SGF; |
1268 | } | 1125 | } |
1269 | } | 1126 | } |
@@ -1271,28 +1128,6 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
1271 | } | 1128 | } |
1272 | 1129 | ||
1273 | /* | 1130 | /* |
1274 | * derive number of elements in scatterlist | ||
1275 | */ | ||
1276 | static int sg_count(struct scatterlist *sg_list, int nbytes) | ||
1277 | { | ||
1278 | struct scatterlist *sg = sg_list; | ||
1279 | int sg_nents = 0; | ||
1280 | |||
1281 | while (nbytes > 0) { | ||
1282 | sg_nents++; | ||
1283 | nbytes -= sg->length; | ||
1284 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | ||
1285 | BUG(); /* Not support chaining */ | ||
1286 | sg = scatterwalk_sg_next(sg); | ||
1287 | } | ||
1288 | |||
1289 | if (likely(sg_nents == 1)) | ||
1290 | return 0; | ||
1291 | |||
1292 | return sg_nents; | ||
1293 | } | ||
1294 | |||
1295 | /* | ||
1296 | * allocate and map the aead extended descriptor | 1131 | * allocate and map the aead extended descriptor |
1297 | */ | 1132 | */ |
1298 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | 1133 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
@@ -1308,25 +1143,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1308 | dma_addr_t iv_dma = 0; | 1143 | dma_addr_t iv_dma = 0; |
1309 | int sgc; | 1144 | int sgc; |
1310 | bool all_contig = true; | 1145 | bool all_contig = true; |
1146 | bool assoc_chained = false, src_chained = false, dst_chained = false; | ||
1311 | int ivsize = crypto_aead_ivsize(aead); | 1147 | int ivsize = crypto_aead_ivsize(aead); |
1312 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; | 1148 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
1313 | 1149 | ||
1314 | assoc_nents = sg_count(req->assoc, req->assoclen); | 1150 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); |
1315 | src_nents = sg_count(req->src, req->cryptlen); | 1151 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
1316 | 1152 | ||
1317 | if (unlikely(req->dst != req->src)) | 1153 | if (unlikely(req->dst != req->src)) |
1318 | dst_nents = sg_count(req->dst, req->cryptlen); | 1154 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); |
1319 | 1155 | ||
1320 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, | 1156 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1321 | DMA_BIDIRECTIONAL); | 1157 | DMA_BIDIRECTIONAL, assoc_chained); |
1322 | if (likely(req->src == req->dst)) { | 1158 | if (likely(req->src == req->dst)) { |
1323 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1159 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
1324 | DMA_BIDIRECTIONAL); | 1160 | DMA_BIDIRECTIONAL, src_chained); |
1325 | } else { | 1161 | } else { |
1326 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1162 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
1327 | DMA_TO_DEVICE); | 1163 | DMA_TO_DEVICE, src_chained); |
1328 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | 1164 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, |
1329 | DMA_FROM_DEVICE); | 1165 | DMA_FROM_DEVICE, dst_chained); |
1330 | } | 1166 | } |
1331 | 1167 | ||
1332 | /* Check if data are contiguous */ | 1168 | /* Check if data are contiguous */ |
@@ -1337,50 +1173,53 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1337 | all_contig = false; | 1173 | all_contig = false; |
1338 | assoc_nents = assoc_nents ? : 1; | 1174 | assoc_nents = assoc_nents ? : 1; |
1339 | src_nents = src_nents ? : 1; | 1175 | src_nents = src_nents ? : 1; |
1340 | link_tbl_len = assoc_nents + 1 + src_nents; | 1176 | sec4_sg_len = assoc_nents + 1 + src_nents; |
1341 | } | 1177 | } |
1342 | link_tbl_len += dst_nents; | 1178 | sec4_sg_len += dst_nents; |
1343 | 1179 | ||
1344 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | 1180 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
1345 | 1181 | ||
1346 | /* allocate space for base edesc and hw desc commands, link tables */ | 1182 | /* allocate space for base edesc and hw desc commands, link tables */ |
1347 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | 1183 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + |
1348 | link_tbl_bytes, GFP_DMA | flags); | 1184 | sec4_sg_bytes, GFP_DMA | flags); |
1349 | if (!edesc) { | 1185 | if (!edesc) { |
1350 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1186 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
1351 | return ERR_PTR(-ENOMEM); | 1187 | return ERR_PTR(-ENOMEM); |
1352 | } | 1188 | } |
1353 | 1189 | ||
1354 | edesc->assoc_nents = assoc_nents; | 1190 | edesc->assoc_nents = assoc_nents; |
1191 | edesc->assoc_chained = assoc_chained; | ||
1355 | edesc->src_nents = src_nents; | 1192 | edesc->src_nents = src_nents; |
1193 | edesc->src_chained = src_chained; | ||
1356 | edesc->dst_nents = dst_nents; | 1194 | edesc->dst_nents = dst_nents; |
1195 | edesc->dst_chained = dst_chained; | ||
1357 | edesc->iv_dma = iv_dma; | 1196 | edesc->iv_dma = iv_dma; |
1358 | edesc->link_tbl_bytes = link_tbl_bytes; | 1197 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1359 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | 1198 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
1360 | desc_bytes; | 1199 | desc_bytes; |
1361 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1200 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1362 | link_tbl_bytes, DMA_TO_DEVICE); | 1201 | sec4_sg_bytes, DMA_TO_DEVICE); |
1363 | *all_contig_ptr = all_contig; | 1202 | *all_contig_ptr = all_contig; |
1364 | 1203 | ||
1365 | link_tbl_index = 0; | 1204 | sec4_sg_index = 0; |
1366 | if (!all_contig) { | 1205 | if (!all_contig) { |
1367 | sg_to_link_tbl(req->assoc, | 1206 | sg_to_sec4_sg(req->assoc, |
1368 | (assoc_nents ? : 1), | 1207 | (assoc_nents ? : 1), |
1369 | edesc->link_tbl + | 1208 | edesc->sec4_sg + |
1370 | link_tbl_index, 0); | 1209 | sec4_sg_index, 0); |
1371 | link_tbl_index += assoc_nents ? : 1; | 1210 | sec4_sg_index += assoc_nents ? : 1; |
1372 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | 1211 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, |
1373 | iv_dma, ivsize, 0); | 1212 | iv_dma, ivsize, 0); |
1374 | link_tbl_index += 1; | 1213 | sec4_sg_index += 1; |
1375 | sg_to_link_tbl_last(req->src, | 1214 | sg_to_sec4_sg_last(req->src, |
1376 | (src_nents ? : 1), | 1215 | (src_nents ? : 1), |
1377 | edesc->link_tbl + | 1216 | edesc->sec4_sg + |
1378 | link_tbl_index, 0); | 1217 | sec4_sg_index, 0); |
1379 | link_tbl_index += src_nents ? : 1; | 1218 | sec4_sg_index += src_nents ? : 1; |
1380 | } | 1219 | } |
1381 | if (dst_nents) { | 1220 | if (dst_nents) { |
1382 | sg_to_link_tbl_last(req->dst, dst_nents, | 1221 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1383 | edesc->link_tbl + link_tbl_index, 0); | 1222 | edesc->sec4_sg + sec4_sg_index, 0); |
1384 | } | 1223 | } |
1385 | 1224 | ||
1386 | return edesc; | 1225 | return edesc; |
@@ -1487,24 +1326,25 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1487 | int sgc; | 1326 | int sgc; |
1488 | u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; | 1327 | u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; |
1489 | int ivsize = crypto_aead_ivsize(aead); | 1328 | int ivsize = crypto_aead_ivsize(aead); |
1490 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; | 1329 | bool assoc_chained = false, src_chained = false, dst_chained = false; |
1330 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | ||
1491 | 1331 | ||
1492 | assoc_nents = sg_count(req->assoc, req->assoclen); | 1332 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); |
1493 | src_nents = sg_count(req->src, req->cryptlen); | 1333 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
1494 | 1334 | ||
1495 | if (unlikely(req->dst != req->src)) | 1335 | if (unlikely(req->dst != req->src)) |
1496 | dst_nents = sg_count(req->dst, req->cryptlen); | 1336 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); |
1497 | 1337 | ||
1498 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, | 1338 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1499 | DMA_BIDIRECTIONAL); | 1339 | DMA_BIDIRECTIONAL, assoc_chained); |
1500 | if (likely(req->src == req->dst)) { | 1340 | if (likely(req->src == req->dst)) { |
1501 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1341 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
1502 | DMA_BIDIRECTIONAL); | 1342 | DMA_BIDIRECTIONAL, src_chained); |
1503 | } else { | 1343 | } else { |
1504 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1344 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
1505 | DMA_TO_DEVICE); | 1345 | DMA_TO_DEVICE, src_chained); |
1506 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | 1346 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, |
1507 | DMA_FROM_DEVICE); | 1347 | DMA_FROM_DEVICE, dst_chained); |
1508 | } | 1348 | } |
1509 | 1349 | ||
1510 | /* Check if data are contiguous */ | 1350 | /* Check if data are contiguous */ |
@@ -1516,58 +1356,61 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1516 | contig &= ~GIV_DST_CONTIG; | 1356 | contig &= ~GIV_DST_CONTIG; |
1517 | if (unlikely(req->src != req->dst)) { | 1357 | if (unlikely(req->src != req->dst)) { |
1518 | dst_nents = dst_nents ? : 1; | 1358 | dst_nents = dst_nents ? : 1; |
1519 | link_tbl_len += 1; | 1359 | sec4_sg_len += 1; |
1520 | } | 1360 | } |
1521 | if (!(contig & GIV_SRC_CONTIG)) { | 1361 | if (!(contig & GIV_SRC_CONTIG)) { |
1522 | assoc_nents = assoc_nents ? : 1; | 1362 | assoc_nents = assoc_nents ? : 1; |
1523 | src_nents = src_nents ? : 1; | 1363 | src_nents = src_nents ? : 1; |
1524 | link_tbl_len += assoc_nents + 1 + src_nents; | 1364 | sec4_sg_len += assoc_nents + 1 + src_nents; |
1525 | if (likely(req->src == req->dst)) | 1365 | if (likely(req->src == req->dst)) |
1526 | contig &= ~GIV_DST_CONTIG; | 1366 | contig &= ~GIV_DST_CONTIG; |
1527 | } | 1367 | } |
1528 | link_tbl_len += dst_nents; | 1368 | sec4_sg_len += dst_nents; |
1529 | 1369 | ||
1530 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | 1370 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
1531 | 1371 | ||
1532 | /* allocate space for base edesc and hw desc commands, link tables */ | 1372 | /* allocate space for base edesc and hw desc commands, link tables */ |
1533 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | 1373 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + |
1534 | link_tbl_bytes, GFP_DMA | flags); | 1374 | sec4_sg_bytes, GFP_DMA | flags); |
1535 | if (!edesc) { | 1375 | if (!edesc) { |
1536 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1376 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
1537 | return ERR_PTR(-ENOMEM); | 1377 | return ERR_PTR(-ENOMEM); |
1538 | } | 1378 | } |
1539 | 1379 | ||
1540 | edesc->assoc_nents = assoc_nents; | 1380 | edesc->assoc_nents = assoc_nents; |
1381 | edesc->assoc_chained = assoc_chained; | ||
1541 | edesc->src_nents = src_nents; | 1382 | edesc->src_nents = src_nents; |
1383 | edesc->src_chained = src_chained; | ||
1542 | edesc->dst_nents = dst_nents; | 1384 | edesc->dst_nents = dst_nents; |
1385 | edesc->dst_chained = dst_chained; | ||
1543 | edesc->iv_dma = iv_dma; | 1386 | edesc->iv_dma = iv_dma; |
1544 | edesc->link_tbl_bytes = link_tbl_bytes; | 1387 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1545 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | 1388 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
1546 | desc_bytes; | 1389 | desc_bytes; |
1547 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1390 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1548 | link_tbl_bytes, DMA_TO_DEVICE); | 1391 | sec4_sg_bytes, DMA_TO_DEVICE); |
1549 | *contig_ptr = contig; | 1392 | *contig_ptr = contig; |
1550 | 1393 | ||
1551 | link_tbl_index = 0; | 1394 | sec4_sg_index = 0; |
1552 | if (!(contig & GIV_SRC_CONTIG)) { | 1395 | if (!(contig & GIV_SRC_CONTIG)) { |
1553 | sg_to_link_tbl(req->assoc, assoc_nents, | 1396 | sg_to_sec4_sg(req->assoc, assoc_nents, |
1554 | edesc->link_tbl + | 1397 | edesc->sec4_sg + |
1555 | link_tbl_index, 0); | 1398 | sec4_sg_index, 0); |
1556 | link_tbl_index += assoc_nents; | 1399 | sec4_sg_index += assoc_nents; |
1557 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | 1400 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, |
1558 | iv_dma, ivsize, 0); | 1401 | iv_dma, ivsize, 0); |
1559 | link_tbl_index += 1; | 1402 | sec4_sg_index += 1; |
1560 | sg_to_link_tbl_last(req->src, src_nents, | 1403 | sg_to_sec4_sg_last(req->src, src_nents, |
1561 | edesc->link_tbl + | 1404 | edesc->sec4_sg + |
1562 | link_tbl_index, 0); | 1405 | sec4_sg_index, 0); |
1563 | link_tbl_index += src_nents; | 1406 | sec4_sg_index += src_nents; |
1564 | } | 1407 | } |
1565 | if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { | 1408 | if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { |
1566 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | 1409 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, |
1567 | iv_dma, ivsize, 0); | 1410 | iv_dma, ivsize, 0); |
1568 | link_tbl_index += 1; | 1411 | sec4_sg_index += 1; |
1569 | sg_to_link_tbl_last(req->dst, dst_nents, | 1412 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1570 | edesc->link_tbl + link_tbl_index, 0); | 1413 | edesc->sec4_sg + sec4_sg_index, 0); |
1571 | } | 1414 | } |
1572 | 1415 | ||
1573 | return edesc; | 1416 | return edesc; |
@@ -1633,27 +1476,28 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1633 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1476 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
1634 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? | 1477 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? |
1635 | GFP_KERNEL : GFP_ATOMIC; | 1478 | GFP_KERNEL : GFP_ATOMIC; |
1636 | int src_nents, dst_nents = 0, link_tbl_bytes; | 1479 | int src_nents, dst_nents = 0, sec4_sg_bytes; |
1637 | struct ablkcipher_edesc *edesc; | 1480 | struct ablkcipher_edesc *edesc; |
1638 | dma_addr_t iv_dma = 0; | 1481 | dma_addr_t iv_dma = 0; |
1639 | bool iv_contig = false; | 1482 | bool iv_contig = false; |
1640 | int sgc; | 1483 | int sgc; |
1641 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 1484 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
1642 | int link_tbl_index; | 1485 | bool src_chained = false, dst_chained = false; |
1486 | int sec4_sg_index; | ||
1643 | 1487 | ||
1644 | src_nents = sg_count(req->src, req->nbytes); | 1488 | src_nents = sg_count(req->src, req->nbytes, &src_chained); |
1645 | 1489 | ||
1646 | if (unlikely(req->dst != req->src)) | 1490 | if (req->dst != req->src) |
1647 | dst_nents = sg_count(req->dst, req->nbytes); | 1491 | dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); |
1648 | 1492 | ||
1649 | if (likely(req->src == req->dst)) { | 1493 | if (likely(req->src == req->dst)) { |
1650 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1494 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
1651 | DMA_BIDIRECTIONAL); | 1495 | DMA_BIDIRECTIONAL, src_chained); |
1652 | } else { | 1496 | } else { |
1653 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1497 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
1654 | DMA_TO_DEVICE); | 1498 | DMA_TO_DEVICE, src_chained); |
1655 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | 1499 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, |
1656 | DMA_FROM_DEVICE); | 1500 | DMA_FROM_DEVICE, dst_chained); |
1657 | } | 1501 | } |
1658 | 1502 | ||
1659 | /* | 1503 | /* |
@@ -1665,44 +1509,46 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1665 | iv_contig = true; | 1509 | iv_contig = true; |
1666 | else | 1510 | else |
1667 | src_nents = src_nents ? : 1; | 1511 | src_nents = src_nents ? : 1; |
1668 | link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * | 1512 | sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * |
1669 | sizeof(struct link_tbl_entry); | 1513 | sizeof(struct sec4_sg_entry); |
1670 | 1514 | ||
1671 | /* allocate space for base edesc and hw desc commands, link tables */ | 1515 | /* allocate space for base edesc and hw desc commands, link tables */ |
1672 | edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + | 1516 | edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + |
1673 | link_tbl_bytes, GFP_DMA | flags); | 1517 | sec4_sg_bytes, GFP_DMA | flags); |
1674 | if (!edesc) { | 1518 | if (!edesc) { |
1675 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1519 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
1676 | return ERR_PTR(-ENOMEM); | 1520 | return ERR_PTR(-ENOMEM); |
1677 | } | 1521 | } |
1678 | 1522 | ||
1679 | edesc->src_nents = src_nents; | 1523 | edesc->src_nents = src_nents; |
1524 | edesc->src_chained = src_chained; | ||
1680 | edesc->dst_nents = dst_nents; | 1525 | edesc->dst_nents = dst_nents; |
1681 | edesc->link_tbl_bytes = link_tbl_bytes; | 1526 | edesc->dst_chained = dst_chained; |
1682 | edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + | 1527 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1683 | desc_bytes; | 1528 | edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + |
1529 | desc_bytes; | ||
1684 | 1530 | ||
1685 | link_tbl_index = 0; | 1531 | sec4_sg_index = 0; |
1686 | if (!iv_contig) { | 1532 | if (!iv_contig) { |
1687 | sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); | 1533 | dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); |
1688 | sg_to_link_tbl_last(req->src, src_nents, | 1534 | sg_to_sec4_sg_last(req->src, src_nents, |
1689 | edesc->link_tbl + 1, 0); | 1535 | edesc->sec4_sg + 1, 0); |
1690 | link_tbl_index += 1 + src_nents; | 1536 | sec4_sg_index += 1 + src_nents; |
1691 | } | 1537 | } |
1692 | 1538 | ||
1693 | if (unlikely(dst_nents)) { | 1539 | if (dst_nents) { |
1694 | sg_to_link_tbl_last(req->dst, dst_nents, | 1540 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1695 | edesc->link_tbl + link_tbl_index, 0); | 1541 | edesc->sec4_sg + sec4_sg_index, 0); |
1696 | } | 1542 | } |
1697 | 1543 | ||
1698 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1544 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1699 | link_tbl_bytes, DMA_TO_DEVICE); | 1545 | sec4_sg_bytes, DMA_TO_DEVICE); |
1700 | edesc->iv_dma = iv_dma; | 1546 | edesc->iv_dma = iv_dma; |
1701 | 1547 | ||
1702 | #ifdef DEBUG | 1548 | #ifdef DEBUG |
1703 | print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", | 1549 | print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ", |
1704 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | 1550 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, |
1705 | link_tbl_bytes, 1); | 1551 | sec4_sg_bytes, 1); |
1706 | #endif | 1552 | #endif |
1707 | 1553 | ||
1708 | *iv_contig_out = iv_contig; | 1554 | *iv_contig_out = iv_contig; |
@@ -2227,7 +2073,7 @@ static int caam_cra_init(struct crypto_tfm *tfm) | |||
2227 | * distribute tfms across job rings to ensure in-order | 2073 | * distribute tfms across job rings to ensure in-order |
2228 | * crypto request processing per tfm | 2074 | * crypto request processing per tfm |
2229 | */ | 2075 | */ |
2230 | ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; | 2076 | ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; |
2231 | 2077 | ||
2232 | /* copy descriptor header template value */ | 2078 | /* copy descriptor header template value */ |
2233 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | 2079 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; |
@@ -2264,7 +2110,6 @@ static void __exit caam_algapi_exit(void) | |||
2264 | struct device *ctrldev; | 2110 | struct device *ctrldev; |
2265 | struct caam_drv_private *priv; | 2111 | struct caam_drv_private *priv; |
2266 | struct caam_crypto_alg *t_alg, *n; | 2112 | struct caam_crypto_alg *t_alg, *n; |
2267 | int i, err; | ||
2268 | 2113 | ||
2269 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 2114 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
2270 | if (!dev_node) { | 2115 | if (!dev_node) { |
@@ -2289,13 +2134,6 @@ static void __exit caam_algapi_exit(void) | |||
2289 | list_del(&t_alg->entry); | 2134 | list_del(&t_alg->entry); |
2290 | kfree(t_alg); | 2135 | kfree(t_alg); |
2291 | } | 2136 | } |
2292 | |||
2293 | for (i = 0; i < priv->total_jobrs; i++) { | ||
2294 | err = caam_jr_deregister(priv->algapi_jr[i]); | ||
2295 | if (err < 0) | ||
2296 | break; | ||
2297 | } | ||
2298 | kfree(priv->algapi_jr); | ||
2299 | } | 2137 | } |
2300 | 2138 | ||
2301 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | 2139 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, |
@@ -2348,7 +2186,7 @@ static int __init caam_algapi_init(void) | |||
2348 | { | 2186 | { |
2349 | struct device_node *dev_node; | 2187 | struct device_node *dev_node; |
2350 | struct platform_device *pdev; | 2188 | struct platform_device *pdev; |
2351 | struct device *ctrldev, **jrdev; | 2189 | struct device *ctrldev; |
2352 | struct caam_drv_private *priv; | 2190 | struct caam_drv_private *priv; |
2353 | int i = 0, err = 0; | 2191 | int i = 0, err = 0; |
2354 | 2192 | ||
@@ -2369,24 +2207,6 @@ static int __init caam_algapi_init(void) | |||
2369 | 2207 | ||
2370 | INIT_LIST_HEAD(&priv->alg_list); | 2208 | INIT_LIST_HEAD(&priv->alg_list); |
2371 | 2209 | ||
2372 | jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL); | ||
2373 | if (!jrdev) | ||
2374 | return -ENOMEM; | ||
2375 | |||
2376 | for (i = 0; i < priv->total_jobrs; i++) { | ||
2377 | err = caam_jr_register(ctrldev, &jrdev[i]); | ||
2378 | if (err < 0) | ||
2379 | break; | ||
2380 | } | ||
2381 | if (err < 0 && i == 0) { | ||
2382 | dev_err(ctrldev, "algapi error in job ring registration: %d\n", | ||
2383 | err); | ||
2384 | kfree(jrdev); | ||
2385 | return err; | ||
2386 | } | ||
2387 | |||
2388 | priv->num_jrs_for_algapi = i; | ||
2389 | priv->algapi_jr = jrdev; | ||
2390 | atomic_set(&priv->tfm_count, -1); | 2210 | atomic_set(&priv->tfm_count, -1); |
2391 | 2211 | ||
2392 | /* register crypto algorithms the device supports */ | 2212 | /* register crypto algorithms the device supports */ |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c new file mode 100644 index 000000000000..895aaf2bca92 --- /dev/null +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -0,0 +1,1878 @@ | |||
1 | /* | ||
2 | * caam - Freescale FSL CAAM support for ahash functions of crypto API | ||
3 | * | ||
4 | * Copyright 2011 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * Based on caamalg.c crypto API driver. | ||
7 | * | ||
8 | * relationship of digest job descriptor or first job descriptor after init to | ||
9 | * shared descriptors: | ||
10 | * | ||
11 | * --------------- --------------- | ||
12 | * | JobDesc #1 |-------------------->| ShareDesc | | ||
13 | * | *(packet 1) | | (hashKey) | | ||
14 | * --------------- | (operation) | | ||
15 | * --------------- | ||
16 | * | ||
17 | * relationship of subsequent job descriptors to shared descriptors: | ||
18 | * | ||
19 | * --------------- --------------- | ||
20 | * | JobDesc #2 |-------------------->| ShareDesc | | ||
21 | * | *(packet 2) | |------------->| (hashKey) | | ||
22 | * --------------- | |-------->| (operation) | | ||
23 | * . | | | (load ctx2) | | ||
24 | * . | | --------------- | ||
25 | * --------------- | | | ||
26 | * | JobDesc #3 |------| | | ||
27 | * | *(packet 3) | | | ||
28 | * --------------- | | ||
29 | * . | | ||
30 | * . | | ||
31 | * --------------- | | ||
32 | * | JobDesc #4 |------------ | ||
33 | * | *(packet 4) | | ||
34 | * --------------- | ||
35 | * | ||
36 | * The SharedDesc never changes for a connection unless rekeyed, but | ||
37 | * each packet will likely be in a different place. So all we need | ||
38 | * to know to process the packet is where the input is, where the | ||
39 | * output goes, and what context we want to process with. Context is | ||
40 | * in the SharedDesc, packet references in the JobDesc. | ||
41 | * | ||
42 | * So, a job desc looks like: | ||
43 | * | ||
44 | * --------------------- | ||
45 | * | Header | | ||
46 | * | ShareDesc Pointer | | ||
47 | * | SEQ_OUT_PTR | | ||
48 | * | (output buffer) | | ||
49 | * | (output length) | | ||
50 | * | SEQ_IN_PTR | | ||
51 | * | (input buffer) | | ||
52 | * | (input length) | | ||
53 | * --------------------- | ||
54 | */ | ||
55 | |||
56 | #include "compat.h" | ||
57 | |||
58 | #include "regs.h" | ||
59 | #include "intern.h" | ||
60 | #include "desc_constr.h" | ||
61 | #include "jr.h" | ||
62 | #include "error.h" | ||
63 | #include "sg_sw_sec4.h" | ||
64 | #include "key_gen.h" | ||
65 | |||
66 | #define CAAM_CRA_PRIORITY 3000 | ||
67 | |||
68 | /* max hash key is max split key size */ | ||
69 | #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) | ||
70 | |||
71 | #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE | ||
72 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE | ||
73 | |||
74 | /* length of descriptors text */ | ||
75 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) | ||
76 | |||
77 | #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) | ||
78 | #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) | ||
79 | #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) | ||
80 | #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) | ||
81 | #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) | ||
82 | #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) | ||
83 | |||
84 | #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ | ||
85 | CAAM_MAX_HASH_KEY_SIZE) | ||
86 | #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) | ||
87 | |||
88 | /* caam context sizes for hashes: running digest + 8 */ | ||
89 | #define HASH_MSG_LEN 8 | ||
90 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) | ||
91 | |||
92 | #ifdef DEBUG | ||
93 | /* for print_hex_dumps with line references */ | ||
94 | #define xstr(s) str(s) | ||
95 | #define str(s) #s | ||
96 | #define debug(format, arg...) printk(format, arg) | ||
97 | #else | ||
98 | #define debug(format, arg...) | ||
99 | #endif | ||
100 | |||
101 | /* ahash per-session context */ | ||
102 | struct caam_hash_ctx { | ||
103 | struct device *jrdev; | ||
104 | u32 sh_desc_update[DESC_HASH_MAX_USED_LEN]; | ||
105 | u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN]; | ||
106 | u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN]; | ||
107 | u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN]; | ||
108 | u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN]; | ||
109 | dma_addr_t sh_desc_update_dma; | ||
110 | dma_addr_t sh_desc_update_first_dma; | ||
111 | dma_addr_t sh_desc_fin_dma; | ||
112 | dma_addr_t sh_desc_digest_dma; | ||
113 | dma_addr_t sh_desc_finup_dma; | ||
114 | u32 alg_type; | ||
115 | u32 alg_op; | ||
116 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; | ||
117 | dma_addr_t key_dma; | ||
118 | int ctx_len; | ||
119 | unsigned int split_key_len; | ||
120 | unsigned int split_key_pad_len; | ||
121 | }; | ||
122 | |||
123 | /* ahash state */ | ||
124 | struct caam_hash_state { | ||
125 | dma_addr_t buf_dma; | ||
126 | dma_addr_t ctx_dma; | ||
127 | u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; | ||
128 | int buflen_0; | ||
129 | u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; | ||
130 | int buflen_1; | ||
131 | u8 caam_ctx[MAX_CTX_LEN]; | ||
132 | int (*update)(struct ahash_request *req); | ||
133 | int (*final)(struct ahash_request *req); | ||
134 | int (*finup)(struct ahash_request *req); | ||
135 | int current_buf; | ||
136 | }; | ||
137 | |||
138 | /* Common job descriptor seq in/out ptr routines */ | ||
139 | |||
140 | /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ | ||
141 | static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, | ||
142 | struct caam_hash_state *state, | ||
143 | int ctx_len) | ||
144 | { | ||
145 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, | ||
146 | ctx_len, DMA_FROM_DEVICE); | ||
147 | append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); | ||
148 | } | ||
149 | |||
150 | /* Map req->result, and append seq_out_ptr command that points to it */ | ||
151 | static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, | ||
152 | u8 *result, int digestsize) | ||
153 | { | ||
154 | dma_addr_t dst_dma; | ||
155 | |||
156 | dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); | ||
157 | append_seq_out_ptr(desc, dst_dma, digestsize, 0); | ||
158 | |||
159 | return dst_dma; | ||
160 | } | ||
161 | |||
162 | /* Map current buffer in state and put it in link table */ | ||
163 | static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, | ||
164 | struct sec4_sg_entry *sec4_sg, | ||
165 | u8 *buf, int buflen) | ||
166 | { | ||
167 | dma_addr_t buf_dma; | ||
168 | |||
169 | buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | ||
170 | dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); | ||
171 | |||
172 | return buf_dma; | ||
173 | } | ||
174 | |||
175 | /* Map req->src and put it in link table */ | ||
176 | static inline void src_map_to_sec4_sg(struct device *jrdev, | ||
177 | struct scatterlist *src, int src_nents, | ||
178 | struct sec4_sg_entry *sec4_sg, | ||
179 | bool chained) | ||
180 | { | ||
181 | dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); | ||
182 | sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * Only put buffer in link table if it contains data, which is possible, | ||
187 | * since a buffer has previously been used, and needs to be unmapped, | ||
188 | */ | ||
189 | static inline dma_addr_t | ||
190 | try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, | ||
191 | u8 *buf, dma_addr_t buf_dma, int buflen, | ||
192 | int last_buflen) | ||
193 | { | ||
194 | if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) | ||
195 | dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); | ||
196 | if (buflen) | ||
197 | buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); | ||
198 | else | ||
199 | buf_dma = 0; | ||
200 | |||
201 | return buf_dma; | ||
202 | } | ||
203 | |||
204 | /* Map state->caam_ctx, and add it to link table */ | ||
205 | static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, | ||
206 | struct caam_hash_state *state, | ||
207 | int ctx_len, | ||
208 | struct sec4_sg_entry *sec4_sg, | ||
209 | u32 flag) | ||
210 | { | ||
211 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); | ||
212 | dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); | ||
213 | } | ||
214 | |||
215 | /* Common shared descriptor commands */ | ||
216 | static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | ||
217 | { | ||
218 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
219 | ctx->split_key_len, CLASS_2 | | ||
220 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
221 | } | ||
222 | |||
223 | /* Append key if it has been set */ | ||
224 | static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | ||
225 | { | ||
226 | u32 *key_jump_cmd; | ||
227 | |||
228 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
229 | |||
230 | if (ctx->split_key_len) { | ||
231 | /* Skip if already shared */ | ||
232 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
233 | JUMP_COND_SHRD); | ||
234 | |||
235 | append_key_ahash(desc, ctx); | ||
236 | |||
237 | set_jump_tgt_here(desc, key_jump_cmd); | ||
238 | } | ||
239 | |||
240 | /* Propagate errors from shared to job descriptor */ | ||
241 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * For ahash read data from seqin following state->caam_ctx, | ||
246 | * and write resulting class2 context to seqout, which may be state->caam_ctx | ||
247 | * or req->result | ||
248 | */ | ||
249 | static inline void ahash_append_load_str(u32 *desc, int digestsize) | ||
250 | { | ||
251 | /* Calculate remaining bytes to read */ | ||
252 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
253 | |||
254 | /* Read remaining bytes */ | ||
255 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | | ||
256 | FIFOLD_TYPE_MSG | KEY_VLF); | ||
257 | |||
258 | /* Store class2 context bytes */ | ||
259 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | ||
260 | LDST_SRCDST_BYTE_CONTEXT); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * For ahash update, final and finup, import context, read and write to seqout | ||
265 | */ | ||
266 | static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, | ||
267 | int digestsize, | ||
268 | struct caam_hash_ctx *ctx) | ||
269 | { | ||
270 | init_sh_desc_key_ahash(desc, ctx); | ||
271 | |||
272 | /* Import context from software */ | ||
273 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
274 | LDST_CLASS_2_CCB | ctx->ctx_len); | ||
275 | |||
276 | /* Class 2 operation */ | ||
277 | append_operation(desc, op | state | OP_ALG_ENCRYPT); | ||
278 | |||
279 | /* | ||
280 | * Load from buf and/or src and write to req->result or state->context | ||
281 | */ | ||
282 | ahash_append_load_str(desc, digestsize); | ||
283 | } | ||
284 | |||
285 | /* For ahash firsts and digest, read and write to seqout */ | ||
286 | static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, | ||
287 | int digestsize, struct caam_hash_ctx *ctx) | ||
288 | { | ||
289 | init_sh_desc_key_ahash(desc, ctx); | ||
290 | |||
291 | /* Class 2 operation */ | ||
292 | append_operation(desc, op | state | OP_ALG_ENCRYPT); | ||
293 | |||
294 | /* | ||
295 | * Load from buf and/or src and write to req->result or state->context | ||
296 | */ | ||
297 | ahash_append_load_str(desc, digestsize); | ||
298 | } | ||
299 | |||
300 | static int ahash_set_sh_desc(struct crypto_ahash *ahash) | ||
301 | { | ||
302 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
303 | int digestsize = crypto_ahash_digestsize(ahash); | ||
304 | struct device *jrdev = ctx->jrdev; | ||
305 | u32 have_key = 0; | ||
306 | u32 *desc; | ||
307 | |||
308 | if (ctx->split_key_len) | ||
309 | have_key = OP_ALG_AAI_HMAC_PRECOMP; | ||
310 | |||
311 | /* ahash_update shared descriptor */ | ||
312 | desc = ctx->sh_desc_update; | ||
313 | |||
314 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
315 | |||
316 | /* Import context from software */ | ||
317 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
318 | LDST_CLASS_2_CCB | ctx->ctx_len); | ||
319 | |||
320 | /* Class 2 operation */ | ||
321 | append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | | ||
322 | OP_ALG_ENCRYPT); | ||
323 | |||
324 | /* Load data and write to result or context */ | ||
325 | ahash_append_load_str(desc, ctx->ctx_len); | ||
326 | |||
327 | ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
328 | DMA_TO_DEVICE); | ||
329 | if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { | ||
330 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
331 | return -ENOMEM; | ||
332 | } | ||
333 | #ifdef DEBUG | ||
334 | print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ", | ||
335 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
336 | #endif | ||
337 | |||
338 | /* ahash_update_first shared descriptor */ | ||
339 | desc = ctx->sh_desc_update_first; | ||
340 | |||
341 | ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, | ||
342 | ctx->ctx_len, ctx); | ||
343 | |||
344 | ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, | ||
345 | desc_bytes(desc), | ||
346 | DMA_TO_DEVICE); | ||
347 | if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { | ||
348 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
349 | return -ENOMEM; | ||
350 | } | ||
351 | #ifdef DEBUG | ||
352 | print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ", | ||
353 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
354 | #endif | ||
355 | |||
356 | /* ahash_final shared descriptor */ | ||
357 | desc = ctx->sh_desc_fin; | ||
358 | |||
359 | ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, | ||
360 | OP_ALG_AS_FINALIZE, digestsize, ctx); | ||
361 | |||
362 | ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
363 | DMA_TO_DEVICE); | ||
364 | if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { | ||
365 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
366 | return -ENOMEM; | ||
367 | } | ||
368 | #ifdef DEBUG | ||
369 | print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ", | ||
370 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
371 | desc_bytes(desc), 1); | ||
372 | #endif | ||
373 | |||
374 | /* ahash_finup shared descriptor */ | ||
375 | desc = ctx->sh_desc_finup; | ||
376 | |||
377 | ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, | ||
378 | OP_ALG_AS_FINALIZE, digestsize, ctx); | ||
379 | |||
380 | ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
381 | DMA_TO_DEVICE); | ||
382 | if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { | ||
383 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
384 | return -ENOMEM; | ||
385 | } | ||
386 | #ifdef DEBUG | ||
387 | print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ", | ||
388 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
389 | desc_bytes(desc), 1); | ||
390 | #endif | ||
391 | |||
392 | /* ahash_digest shared descriptor */ | ||
393 | desc = ctx->sh_desc_digest; | ||
394 | |||
395 | ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, | ||
396 | digestsize, ctx); | ||
397 | |||
398 | ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, | ||
399 | desc_bytes(desc), | ||
400 | DMA_TO_DEVICE); | ||
401 | if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { | ||
402 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
403 | return -ENOMEM; | ||
404 | } | ||
405 | #ifdef DEBUG | ||
406 | print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ", | ||
407 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
408 | desc_bytes(desc), 1); | ||
409 | #endif | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, | ||
415 | u32 keylen) | ||
416 | { | ||
417 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, | ||
418 | ctx->split_key_pad_len, key_in, keylen, | ||
419 | ctx->alg_op); | ||
420 | } | ||
421 | |||
422 | /* Digest hash size if it is too large */ | ||
423 | static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | ||
424 | u32 *keylen, u8 *key_out, u32 digestsize) | ||
425 | { | ||
426 | struct device *jrdev = ctx->jrdev; | ||
427 | u32 *desc; | ||
428 | struct split_key_result result; | ||
429 | dma_addr_t src_dma, dst_dma; | ||
430 | int ret = 0; | ||
431 | |||
432 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | ||
433 | |||
434 | init_job_desc(desc, 0); | ||
435 | |||
436 | src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, | ||
437 | DMA_TO_DEVICE); | ||
438 | if (dma_mapping_error(jrdev, src_dma)) { | ||
439 | dev_err(jrdev, "unable to map key input memory\n"); | ||
440 | kfree(desc); | ||
441 | return -ENOMEM; | ||
442 | } | ||
443 | dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, | ||
444 | DMA_FROM_DEVICE); | ||
445 | if (dma_mapping_error(jrdev, dst_dma)) { | ||
446 | dev_err(jrdev, "unable to map key output memory\n"); | ||
447 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); | ||
448 | kfree(desc); | ||
449 | return -ENOMEM; | ||
450 | } | ||
451 | |||
452 | /* Job descriptor to perform unkeyed hash on key_in */ | ||
453 | append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | | ||
454 | OP_ALG_AS_INITFINAL); | ||
455 | append_seq_in_ptr(desc, src_dma, *keylen, 0); | ||
456 | append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | | ||
457 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); | ||
458 | append_seq_out_ptr(desc, dst_dma, digestsize, 0); | ||
459 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | ||
460 | LDST_SRCDST_BYTE_CONTEXT); | ||
461 | |||
462 | #ifdef DEBUG | ||
463 | print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ", | ||
464 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); | ||
465 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
466 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
467 | #endif | ||
468 | |||
469 | result.err = 0; | ||
470 | init_completion(&result.completion); | ||
471 | |||
472 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | ||
473 | if (!ret) { | ||
474 | /* in progress */ | ||
475 | wait_for_completion_interruptible(&result.completion); | ||
476 | ret = result.err; | ||
477 | #ifdef DEBUG | ||
478 | print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ", | ||
479 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, | ||
480 | digestsize, 1); | ||
481 | #endif | ||
482 | } | ||
483 | *keylen = digestsize; | ||
484 | |||
485 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); | ||
486 | dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); | ||
487 | |||
488 | kfree(desc); | ||
489 | |||
490 | return ret; | ||
491 | } | ||
492 | |||
493 | static int ahash_setkey(struct crypto_ahash *ahash, | ||
494 | const u8 *key, unsigned int keylen) | ||
495 | { | ||
496 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
497 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
498 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
499 | struct device *jrdev = ctx->jrdev; | ||
500 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); | ||
501 | int digestsize = crypto_ahash_digestsize(ahash); | ||
502 | int ret = 0; | ||
503 | u8 *hashed_key = NULL; | ||
504 | |||
505 | #ifdef DEBUG | ||
506 | printk(KERN_ERR "keylen %d\n", keylen); | ||
507 | #endif | ||
508 | |||
509 | if (keylen > blocksize) { | ||
510 | hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL | | ||
511 | GFP_DMA); | ||
512 | if (!hashed_key) | ||
513 | return -ENOMEM; | ||
514 | ret = hash_digest_key(ctx, key, &keylen, hashed_key, | ||
515 | digestsize); | ||
516 | if (ret) | ||
517 | goto badkey; | ||
518 | key = hashed_key; | ||
519 | } | ||
520 | |||
521 | /* Pick class 2 key length from algorithm submask */ | ||
522 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | ||
523 | OP_ALG_ALGSEL_SHIFT] * 2; | ||
524 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | ||
525 | |||
526 | #ifdef DEBUG | ||
527 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | ||
528 | ctx->split_key_len, ctx->split_key_pad_len); | ||
529 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | ||
530 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
531 | #endif | ||
532 | |||
533 | ret = gen_split_hash_key(ctx, key, keylen); | ||
534 | if (ret) | ||
535 | goto badkey; | ||
536 | |||
537 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | ||
538 | DMA_TO_DEVICE); | ||
539 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | ||
540 | dev_err(jrdev, "unable to map key i/o memory\n"); | ||
541 | return -ENOMEM; | ||
542 | } | ||
543 | #ifdef DEBUG | ||
544 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
545 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | ||
546 | ctx->split_key_pad_len, 1); | ||
547 | #endif | ||
548 | |||
549 | ret = ahash_set_sh_desc(ahash); | ||
550 | if (ret) { | ||
551 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, | ||
552 | DMA_TO_DEVICE); | ||
553 | } | ||
554 | |||
555 | kfree(hashed_key); | ||
556 | return ret; | ||
557 | badkey: | ||
558 | kfree(hashed_key); | ||
559 | crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
560 | return -EINVAL; | ||
561 | } | ||
562 | |||
563 | /* | ||
564 | * ahash_edesc - s/w-extended ahash descriptor | ||
565 | * @dst_dma: physical mapped address of req->result | ||
566 | * @sec4_sg_dma: physical mapped address of h/w link table | ||
567 | * @chained: if source is chained | ||
568 | * @src_nents: number of segments in input scatterlist | ||
569 | * @sec4_sg_bytes: length of dma mapped sec4_sg space | ||
570 | * @sec4_sg: pointer to h/w link table | ||
571 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | ||
572 | */ | ||
573 | struct ahash_edesc { | ||
574 | dma_addr_t dst_dma; | ||
575 | dma_addr_t sec4_sg_dma; | ||
576 | bool chained; | ||
577 | int src_nents; | ||
578 | int sec4_sg_bytes; | ||
579 | struct sec4_sg_entry *sec4_sg; | ||
580 | u32 hw_desc[0]; | ||
581 | }; | ||
582 | |||
583 | static inline void ahash_unmap(struct device *dev, | ||
584 | struct ahash_edesc *edesc, | ||
585 | struct ahash_request *req, int dst_len) | ||
586 | { | ||
587 | if (edesc->src_nents) | ||
588 | dma_unmap_sg_chained(dev, req->src, edesc->src_nents, | ||
589 | DMA_TO_DEVICE, edesc->chained); | ||
590 | if (edesc->dst_dma) | ||
591 | dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); | ||
592 | |||
593 | if (edesc->sec4_sg_bytes) | ||
594 | dma_unmap_single(dev, edesc->sec4_sg_dma, | ||
595 | edesc->sec4_sg_bytes, DMA_TO_DEVICE); | ||
596 | } | ||
597 | |||
598 | static inline void ahash_unmap_ctx(struct device *dev, | ||
599 | struct ahash_edesc *edesc, | ||
600 | struct ahash_request *req, int dst_len, u32 flag) | ||
601 | { | ||
602 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
603 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
604 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
605 | |||
606 | if (state->ctx_dma) | ||
607 | dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); | ||
608 | ahash_unmap(dev, edesc, req, dst_len); | ||
609 | } | ||
610 | |||
611 | static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | ||
612 | void *context) | ||
613 | { | ||
614 | struct ahash_request *req = context; | ||
615 | struct ahash_edesc *edesc; | ||
616 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
617 | int digestsize = crypto_ahash_digestsize(ahash); | ||
618 | #ifdef DEBUG | ||
619 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
620 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
621 | |||
622 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
623 | #endif | ||
624 | |||
625 | edesc = (struct ahash_edesc *)((char *)desc - | ||
626 | offsetof(struct ahash_edesc, hw_desc)); | ||
627 | if (err) { | ||
628 | char tmp[CAAM_ERROR_STR_MAX]; | ||
629 | |||
630 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
631 | } | ||
632 | |||
633 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
634 | kfree(edesc); | ||
635 | |||
636 | #ifdef DEBUG | ||
637 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | ||
638 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | ||
639 | ctx->ctx_len, 1); | ||
640 | if (req->result) | ||
641 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | ||
642 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | ||
643 | digestsize, 1); | ||
644 | #endif | ||
645 | |||
646 | req->base.complete(&req->base, err); | ||
647 | } | ||
648 | |||
649 | static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | ||
650 | void *context) | ||
651 | { | ||
652 | struct ahash_request *req = context; | ||
653 | struct ahash_edesc *edesc; | ||
654 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
655 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
656 | #ifdef DEBUG | ||
657 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
658 | int digestsize = crypto_ahash_digestsize(ahash); | ||
659 | |||
660 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
661 | #endif | ||
662 | |||
663 | edesc = (struct ahash_edesc *)((char *)desc - | ||
664 | offsetof(struct ahash_edesc, hw_desc)); | ||
665 | if (err) { | ||
666 | char tmp[CAAM_ERROR_STR_MAX]; | ||
667 | |||
668 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
669 | } | ||
670 | |||
671 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); | ||
672 | kfree(edesc); | ||
673 | |||
674 | #ifdef DEBUG | ||
675 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | ||
676 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | ||
677 | ctx->ctx_len, 1); | ||
678 | if (req->result) | ||
679 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | ||
680 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | ||
681 | digestsize, 1); | ||
682 | #endif | ||
683 | |||
684 | req->base.complete(&req->base, err); | ||
685 | } | ||
686 | |||
687 | static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | ||
688 | void *context) | ||
689 | { | ||
690 | struct ahash_request *req = context; | ||
691 | struct ahash_edesc *edesc; | ||
692 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
693 | int digestsize = crypto_ahash_digestsize(ahash); | ||
694 | #ifdef DEBUG | ||
695 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
696 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
697 | |||
698 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
699 | #endif | ||
700 | |||
701 | edesc = (struct ahash_edesc *)((char *)desc - | ||
702 | offsetof(struct ahash_edesc, hw_desc)); | ||
703 | if (err) { | ||
704 | char tmp[CAAM_ERROR_STR_MAX]; | ||
705 | |||
706 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
707 | } | ||
708 | |||
709 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
710 | kfree(edesc); | ||
711 | |||
712 | #ifdef DEBUG | ||
713 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | ||
714 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | ||
715 | ctx->ctx_len, 1); | ||
716 | if (req->result) | ||
717 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | ||
718 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | ||
719 | digestsize, 1); | ||
720 | #endif | ||
721 | |||
722 | req->base.complete(&req->base, err); | ||
723 | } | ||
724 | |||
725 | static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | ||
726 | void *context) | ||
727 | { | ||
728 | struct ahash_request *req = context; | ||
729 | struct ahash_edesc *edesc; | ||
730 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
731 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
732 | #ifdef DEBUG | ||
733 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
734 | int digestsize = crypto_ahash_digestsize(ahash); | ||
735 | |||
736 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
737 | #endif | ||
738 | |||
739 | edesc = (struct ahash_edesc *)((char *)desc - | ||
740 | offsetof(struct ahash_edesc, hw_desc)); | ||
741 | if (err) { | ||
742 | char tmp[CAAM_ERROR_STR_MAX]; | ||
743 | |||
744 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
745 | } | ||
746 | |||
747 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); | ||
748 | kfree(edesc); | ||
749 | |||
750 | #ifdef DEBUG | ||
751 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | ||
752 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | ||
753 | ctx->ctx_len, 1); | ||
754 | if (req->result) | ||
755 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | ||
756 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | ||
757 | digestsize, 1); | ||
758 | #endif | ||
759 | |||
760 | req->base.complete(&req->base, err); | ||
761 | } | ||
762 | |||
763 | /* submit update job descriptor */ | ||
764 | static int ahash_update_ctx(struct ahash_request *req) | ||
765 | { | ||
766 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
767 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
768 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
769 | struct device *jrdev = ctx->jrdev; | ||
770 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
771 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
772 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
773 | int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; | ||
774 | u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; | ||
775 | int *next_buflen = state->current_buf ? &state->buflen_0 : | ||
776 | &state->buflen_1, last_buflen; | ||
777 | int in_len = *buflen + req->nbytes, to_hash; | ||
778 | u32 *sh_desc = ctx->sh_desc_update, *desc; | ||
779 | dma_addr_t ptr = ctx->sh_desc_update_dma; | ||
780 | int src_nents, sec4_sg_bytes, sec4_sg_src_index; | ||
781 | struct ahash_edesc *edesc; | ||
782 | bool chained = false; | ||
783 | int ret = 0; | ||
784 | int sh_len; | ||
785 | |||
786 | last_buflen = *next_buflen; | ||
787 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); | ||
788 | to_hash = in_len - *next_buflen; | ||
789 | |||
790 | if (to_hash) { | ||
791 | src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), | ||
792 | &chained); | ||
793 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); | ||
794 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | ||
795 | sizeof(struct sec4_sg_entry); | ||
796 | |||
797 | /* | ||
798 | * allocate space for base edesc and hw desc commands, | ||
799 | * link tables | ||
800 | */ | ||
801 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
802 | sec4_sg_bytes, GFP_DMA | flags); | ||
803 | if (!edesc) { | ||
804 | dev_err(jrdev, | ||
805 | "could not allocate extended descriptor\n"); | ||
806 | return -ENOMEM; | ||
807 | } | ||
808 | |||
809 | edesc->src_nents = src_nents; | ||
810 | edesc->chained = chained; | ||
811 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
812 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
813 | DESC_JOB_IO_LEN; | ||
814 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
815 | sec4_sg_bytes, | ||
816 | DMA_TO_DEVICE); | ||
817 | |||
818 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | ||
819 | edesc->sec4_sg, DMA_BIDIRECTIONAL); | ||
820 | |||
821 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, | ||
822 | edesc->sec4_sg + 1, | ||
823 | buf, state->buf_dma, | ||
824 | *buflen, last_buflen); | ||
825 | |||
826 | if (src_nents) { | ||
827 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | ||
828 | edesc->sec4_sg + sec4_sg_src_index, | ||
829 | chained); | ||
830 | if (*next_buflen) { | ||
831 | sg_copy_part(next_buf, req->src, to_hash - | ||
832 | *buflen, req->nbytes); | ||
833 | state->current_buf = !state->current_buf; | ||
834 | } | ||
835 | } else { | ||
836 | (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= | ||
837 | SEC4_SG_LEN_FIN; | ||
838 | } | ||
839 | |||
840 | sh_len = desc_len(sh_desc); | ||
841 | desc = edesc->hw_desc; | ||
842 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
843 | HDR_REVERSE); | ||
844 | |||
845 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | ||
846 | to_hash, LDST_SGF); | ||
847 | |||
848 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); | ||
849 | |||
850 | #ifdef DEBUG | ||
851 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
852 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
853 | desc_bytes(desc), 1); | ||
854 | #endif | ||
855 | |||
856 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); | ||
857 | if (!ret) { | ||
858 | ret = -EINPROGRESS; | ||
859 | } else { | ||
860 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | ||
861 | DMA_BIDIRECTIONAL); | ||
862 | kfree(edesc); | ||
863 | } | ||
864 | } else if (*next_buflen) { | ||
865 | sg_copy(buf + *buflen, req->src, req->nbytes); | ||
866 | *buflen = *next_buflen; | ||
867 | *next_buflen = last_buflen; | ||
868 | } | ||
869 | #ifdef DEBUG | ||
870 | print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", | ||
871 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | ||
872 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | ||
873 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | ||
874 | *next_buflen, 1); | ||
875 | #endif | ||
876 | |||
877 | return ret; | ||
878 | } | ||
879 | |||
880 | static int ahash_final_ctx(struct ahash_request *req) | ||
881 | { | ||
882 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
883 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
884 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
885 | struct device *jrdev = ctx->jrdev; | ||
886 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
887 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
888 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
889 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | ||
890 | int last_buflen = state->current_buf ? state->buflen_0 : | ||
891 | state->buflen_1; | ||
892 | u32 *sh_desc = ctx->sh_desc_fin, *desc; | ||
893 | dma_addr_t ptr = ctx->sh_desc_fin_dma; | ||
894 | int sec4_sg_bytes; | ||
895 | int digestsize = crypto_ahash_digestsize(ahash); | ||
896 | struct ahash_edesc *edesc; | ||
897 | int ret = 0; | ||
898 | int sh_len; | ||
899 | |||
900 | sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); | ||
901 | |||
902 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
903 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
904 | sec4_sg_bytes, GFP_DMA | flags); | ||
905 | if (!edesc) { | ||
906 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
907 | return -ENOMEM; | ||
908 | } | ||
909 | |||
910 | sh_len = desc_len(sh_desc); | ||
911 | desc = edesc->hw_desc; | ||
912 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
913 | |||
914 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
915 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
916 | DESC_JOB_IO_LEN; | ||
917 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
918 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
919 | edesc->src_nents = 0; | ||
920 | |||
921 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, | ||
922 | DMA_TO_DEVICE); | ||
923 | |||
924 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | ||
925 | buf, state->buf_dma, buflen, | ||
926 | last_buflen); | ||
927 | (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; | ||
928 | |||
929 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, | ||
930 | LDST_SGF); | ||
931 | |||
932 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
933 | digestsize); | ||
934 | |||
935 | #ifdef DEBUG | ||
936 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
937 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
938 | #endif | ||
939 | |||
940 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | ||
941 | if (!ret) { | ||
942 | ret = -EINPROGRESS; | ||
943 | } else { | ||
944 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
945 | kfree(edesc); | ||
946 | } | ||
947 | |||
948 | return ret; | ||
949 | } | ||
950 | |||
951 | static int ahash_finup_ctx(struct ahash_request *req) | ||
952 | { | ||
953 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
954 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
955 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
956 | struct device *jrdev = ctx->jrdev; | ||
957 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
958 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
959 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
960 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | ||
961 | int last_buflen = state->current_buf ? state->buflen_0 : | ||
962 | state->buflen_1; | ||
963 | u32 *sh_desc = ctx->sh_desc_finup, *desc; | ||
964 | dma_addr_t ptr = ctx->sh_desc_finup_dma; | ||
965 | int sec4_sg_bytes, sec4_sg_src_index; | ||
966 | int src_nents; | ||
967 | int digestsize = crypto_ahash_digestsize(ahash); | ||
968 | struct ahash_edesc *edesc; | ||
969 | bool chained = false; | ||
970 | int ret = 0; | ||
971 | int sh_len; | ||
972 | |||
973 | src_nents = __sg_count(req->src, req->nbytes, &chained); | ||
974 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); | ||
975 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | ||
976 | sizeof(struct sec4_sg_entry); | ||
977 | |||
978 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
979 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
980 | sec4_sg_bytes, GFP_DMA | flags); | ||
981 | if (!edesc) { | ||
982 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
983 | return -ENOMEM; | ||
984 | } | ||
985 | |||
986 | sh_len = desc_len(sh_desc); | ||
987 | desc = edesc->hw_desc; | ||
988 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
989 | |||
990 | edesc->src_nents = src_nents; | ||
991 | edesc->chained = chained; | ||
992 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
993 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
994 | DESC_JOB_IO_LEN; | ||
995 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
996 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
997 | |||
998 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, | ||
999 | DMA_TO_DEVICE); | ||
1000 | |||
1001 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | ||
1002 | buf, state->buf_dma, buflen, | ||
1003 | last_buflen); | ||
1004 | |||
1005 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + | ||
1006 | sec4_sg_src_index, chained); | ||
1007 | |||
1008 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | ||
1009 | buflen + req->nbytes, LDST_SGF); | ||
1010 | |||
1011 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
1012 | digestsize); | ||
1013 | |||
1014 | #ifdef DEBUG | ||
1015 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
1016 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
1017 | #endif | ||
1018 | |||
1019 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | ||
1020 | if (!ret) { | ||
1021 | ret = -EINPROGRESS; | ||
1022 | } else { | ||
1023 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
1024 | kfree(edesc); | ||
1025 | } | ||
1026 | |||
1027 | return ret; | ||
1028 | } | ||
1029 | |||
1030 | static int ahash_digest(struct ahash_request *req) | ||
1031 | { | ||
1032 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1033 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
1034 | struct device *jrdev = ctx->jrdev; | ||
1035 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1036 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
1037 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | ||
1038 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | ||
1039 | int digestsize = crypto_ahash_digestsize(ahash); | ||
1040 | int src_nents, sec4_sg_bytes; | ||
1041 | dma_addr_t src_dma; | ||
1042 | struct ahash_edesc *edesc; | ||
1043 | bool chained = false; | ||
1044 | int ret = 0; | ||
1045 | u32 options; | ||
1046 | int sh_len; | ||
1047 | |||
1048 | src_nents = sg_count(req->src, req->nbytes, &chained); | ||
1049 | dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, | ||
1050 | chained); | ||
1051 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | ||
1052 | |||
1053 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
1054 | edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + | ||
1055 | DESC_JOB_IO_LEN, GFP_DMA | flags); | ||
1056 | if (!edesc) { | ||
1057 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
1058 | return -ENOMEM; | ||
1059 | } | ||
1060 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
1061 | DESC_JOB_IO_LEN; | ||
1062 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1063 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1064 | edesc->src_nents = src_nents; | ||
1065 | edesc->chained = chained; | ||
1066 | |||
1067 | sh_len = desc_len(sh_desc); | ||
1068 | desc = edesc->hw_desc; | ||
1069 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
1070 | |||
1071 | if (src_nents) { | ||
1072 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); | ||
1073 | src_dma = edesc->sec4_sg_dma; | ||
1074 | options = LDST_SGF; | ||
1075 | } else { | ||
1076 | src_dma = sg_dma_address(req->src); | ||
1077 | options = 0; | ||
1078 | } | ||
1079 | append_seq_in_ptr(desc, src_dma, req->nbytes, options); | ||
1080 | |||
1081 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
1082 | digestsize); | ||
1083 | |||
1084 | #ifdef DEBUG | ||
1085 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
1086 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
1087 | #endif | ||
1088 | |||
1089 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | ||
1090 | if (!ret) { | ||
1091 | ret = -EINPROGRESS; | ||
1092 | } else { | ||
1093 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
1094 | kfree(edesc); | ||
1095 | } | ||
1096 | |||
1097 | return ret; | ||
1098 | } | ||
1099 | |||
1100 | /* submit ahash final if it the first job descriptor */ | ||
1101 | static int ahash_final_no_ctx(struct ahash_request *req) | ||
1102 | { | ||
1103 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1104 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
1105 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1106 | struct device *jrdev = ctx->jrdev; | ||
1107 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1108 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
1109 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
1110 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | ||
1111 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | ||
1112 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | ||
1113 | int digestsize = crypto_ahash_digestsize(ahash); | ||
1114 | struct ahash_edesc *edesc; | ||
1115 | int ret = 0; | ||
1116 | int sh_len; | ||
1117 | |||
1118 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
1119 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, | ||
1120 | GFP_DMA | flags); | ||
1121 | if (!edesc) { | ||
1122 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
1123 | return -ENOMEM; | ||
1124 | } | ||
1125 | |||
1126 | sh_len = desc_len(sh_desc); | ||
1127 | desc = edesc->hw_desc; | ||
1128 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
1129 | |||
1130 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | ||
1131 | |||
1132 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | ||
1133 | |||
1134 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
1135 | digestsize); | ||
1136 | edesc->src_nents = 0; | ||
1137 | |||
1138 | #ifdef DEBUG | ||
1139 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
1140 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
1141 | #endif | ||
1142 | |||
1143 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | ||
1144 | if (!ret) { | ||
1145 | ret = -EINPROGRESS; | ||
1146 | } else { | ||
1147 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
1148 | kfree(edesc); | ||
1149 | } | ||
1150 | |||
1151 | return ret; | ||
1152 | } | ||
1153 | |||
1154 | /* submit ahash update if it the first job descriptor after update */ | ||
1155 | static int ahash_update_no_ctx(struct ahash_request *req) | ||
1156 | { | ||
1157 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1158 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
1159 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1160 | struct device *jrdev = ctx->jrdev; | ||
1161 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1162 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
1163 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
1164 | int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; | ||
1165 | u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; | ||
1166 | int *next_buflen = state->current_buf ? &state->buflen_0 : | ||
1167 | &state->buflen_1; | ||
1168 | int in_len = *buflen + req->nbytes, to_hash; | ||
1169 | int sec4_sg_bytes, src_nents; | ||
1170 | struct ahash_edesc *edesc; | ||
1171 | u32 *desc, *sh_desc = ctx->sh_desc_update_first; | ||
1172 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; | ||
1173 | bool chained = false; | ||
1174 | int ret = 0; | ||
1175 | int sh_len; | ||
1176 | |||
1177 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); | ||
1178 | to_hash = in_len - *next_buflen; | ||
1179 | |||
1180 | if (to_hash) { | ||
1181 | src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), | ||
1182 | &chained); | ||
1183 | sec4_sg_bytes = (1 + src_nents) * | ||
1184 | sizeof(struct sec4_sg_entry); | ||
1185 | |||
1186 | /* | ||
1187 | * allocate space for base edesc and hw desc commands, | ||
1188 | * link tables | ||
1189 | */ | ||
1190 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
1191 | sec4_sg_bytes, GFP_DMA | flags); | ||
1192 | if (!edesc) { | ||
1193 | dev_err(jrdev, | ||
1194 | "could not allocate extended descriptor\n"); | ||
1195 | return -ENOMEM; | ||
1196 | } | ||
1197 | |||
1198 | edesc->src_nents = src_nents; | ||
1199 | edesc->chained = chained; | ||
1200 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
1201 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
1202 | DESC_JOB_IO_LEN; | ||
1203 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1204 | sec4_sg_bytes, | ||
1205 | DMA_TO_DEVICE); | ||
1206 | |||
1207 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, | ||
1208 | buf, *buflen); | ||
1209 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | ||
1210 | edesc->sec4_sg + 1, chained); | ||
1211 | if (*next_buflen) { | ||
1212 | sg_copy_part(next_buf, req->src, to_hash - *buflen, | ||
1213 | req->nbytes); | ||
1214 | state->current_buf = !state->current_buf; | ||
1215 | } | ||
1216 | |||
1217 | sh_len = desc_len(sh_desc); | ||
1218 | desc = edesc->hw_desc; | ||
1219 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
1220 | HDR_REVERSE); | ||
1221 | |||
1222 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); | ||
1223 | |||
1224 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | ||
1225 | |||
1226 | #ifdef DEBUG | ||
1227 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
1228 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
1229 | desc_bytes(desc), 1); | ||
1230 | #endif | ||
1231 | |||
1232 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); | ||
1233 | if (!ret) { | ||
1234 | ret = -EINPROGRESS; | ||
1235 | state->update = ahash_update_ctx; | ||
1236 | state->finup = ahash_finup_ctx; | ||
1237 | state->final = ahash_final_ctx; | ||
1238 | } else { | ||
1239 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | ||
1240 | DMA_TO_DEVICE); | ||
1241 | kfree(edesc); | ||
1242 | } | ||
1243 | } else if (*next_buflen) { | ||
1244 | sg_copy(buf + *buflen, req->src, req->nbytes); | ||
1245 | *buflen = *next_buflen; | ||
1246 | *next_buflen = 0; | ||
1247 | } | ||
1248 | #ifdef DEBUG | ||
1249 | print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", | ||
1250 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | ||
1251 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | ||
1252 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | ||
1253 | *next_buflen, 1); | ||
1254 | #endif | ||
1255 | |||
1256 | return ret; | ||
1257 | } | ||
1258 | |||
1259 | /* submit ahash finup if it the first job descriptor after update */ | ||
1260 | static int ahash_finup_no_ctx(struct ahash_request *req) | ||
1261 | { | ||
1262 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1263 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
1264 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1265 | struct device *jrdev = ctx->jrdev; | ||
1266 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1267 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
1268 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
1269 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | ||
1270 | int last_buflen = state->current_buf ? state->buflen_0 : | ||
1271 | state->buflen_1; | ||
1272 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | ||
1273 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | ||
1274 | int sec4_sg_bytes, sec4_sg_src_index, src_nents; | ||
1275 | int digestsize = crypto_ahash_digestsize(ahash); | ||
1276 | struct ahash_edesc *edesc; | ||
1277 | bool chained = false; | ||
1278 | int sh_len; | ||
1279 | int ret = 0; | ||
1280 | |||
1281 | src_nents = __sg_count(req->src, req->nbytes, &chained); | ||
1282 | sec4_sg_src_index = 2; | ||
1283 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | ||
1284 | sizeof(struct sec4_sg_entry); | ||
1285 | |||
1286 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
1287 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
1288 | sec4_sg_bytes, GFP_DMA | flags); | ||
1289 | if (!edesc) { | ||
1290 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
1291 | return -ENOMEM; | ||
1292 | } | ||
1293 | |||
1294 | sh_len = desc_len(sh_desc); | ||
1295 | desc = edesc->hw_desc; | ||
1296 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
1297 | |||
1298 | edesc->src_nents = src_nents; | ||
1299 | edesc->chained = chained; | ||
1300 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
1301 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
1302 | DESC_JOB_IO_LEN; | ||
1303 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1304 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1305 | |||
1306 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, | ||
1307 | state->buf_dma, buflen, | ||
1308 | last_buflen); | ||
1309 | |||
1310 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, | ||
1311 | chained); | ||
1312 | |||
1313 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + | ||
1314 | req->nbytes, LDST_SGF); | ||
1315 | |||
1316 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
1317 | digestsize); | ||
1318 | |||
1319 | #ifdef DEBUG | ||
1320 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
1321 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
1322 | #endif | ||
1323 | |||
1324 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | ||
1325 | if (!ret) { | ||
1326 | ret = -EINPROGRESS; | ||
1327 | } else { | ||
1328 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
1329 | kfree(edesc); | ||
1330 | } | ||
1331 | |||
1332 | return ret; | ||
1333 | } | ||
1334 | |||
1335 | /* submit first update job descriptor after init */ | ||
1336 | static int ahash_update_first(struct ahash_request *req) | ||
1337 | { | ||
1338 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1339 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
1340 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1341 | struct device *jrdev = ctx->jrdev; | ||
1342 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1343 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
1344 | u8 *next_buf = state->buf_0 + state->current_buf * | ||
1345 | CAAM_MAX_HASH_BLOCK_SIZE; | ||
1346 | int *next_buflen = &state->buflen_0 + state->current_buf; | ||
1347 | int to_hash; | ||
1348 | u32 *sh_desc = ctx->sh_desc_update_first, *desc; | ||
1349 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; | ||
1350 | int sec4_sg_bytes, src_nents; | ||
1351 | dma_addr_t src_dma; | ||
1352 | u32 options; | ||
1353 | struct ahash_edesc *edesc; | ||
1354 | bool chained = false; | ||
1355 | int ret = 0; | ||
1356 | int sh_len; | ||
1357 | |||
1358 | *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - | ||
1359 | 1); | ||
1360 | to_hash = req->nbytes - *next_buflen; | ||
1361 | |||
1362 | if (to_hash) { | ||
1363 | src_nents = sg_count(req->src, req->nbytes - (*next_buflen), | ||
1364 | &chained); | ||
1365 | dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
1366 | DMA_TO_DEVICE, chained); | ||
1367 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | ||
1368 | |||
1369 | /* | ||
1370 | * allocate space for base edesc and hw desc commands, | ||
1371 | * link tables | ||
1372 | */ | ||
1373 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
1374 | sec4_sg_bytes, GFP_DMA | flags); | ||
1375 | if (!edesc) { | ||
1376 | dev_err(jrdev, | ||
1377 | "could not allocate extended descriptor\n"); | ||
1378 | return -ENOMEM; | ||
1379 | } | ||
1380 | |||
1381 | edesc->src_nents = src_nents; | ||
1382 | edesc->chained = chained; | ||
1383 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
1384 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
1385 | DESC_JOB_IO_LEN; | ||
1386 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1387 | sec4_sg_bytes, | ||
1388 | DMA_TO_DEVICE); | ||
1389 | |||
1390 | if (src_nents) { | ||
1391 | sg_to_sec4_sg_last(req->src, src_nents, | ||
1392 | edesc->sec4_sg, 0); | ||
1393 | src_dma = edesc->sec4_sg_dma; | ||
1394 | options = LDST_SGF; | ||
1395 | } else { | ||
1396 | src_dma = sg_dma_address(req->src); | ||
1397 | options = 0; | ||
1398 | } | ||
1399 | |||
1400 | if (*next_buflen) | ||
1401 | sg_copy_part(next_buf, req->src, to_hash, req->nbytes); | ||
1402 | |||
1403 | sh_len = desc_len(sh_desc); | ||
1404 | desc = edesc->hw_desc; | ||
1405 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
1406 | HDR_REVERSE); | ||
1407 | |||
1408 | append_seq_in_ptr(desc, src_dma, to_hash, options); | ||
1409 | |||
1410 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | ||
1411 | |||
1412 | #ifdef DEBUG | ||
1413 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
1414 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
1415 | desc_bytes(desc), 1); | ||
1416 | #endif | ||
1417 | |||
1418 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, | ||
1419 | req); | ||
1420 | if (!ret) { | ||
1421 | ret = -EINPROGRESS; | ||
1422 | state->update = ahash_update_ctx; | ||
1423 | state->finup = ahash_finup_ctx; | ||
1424 | state->final = ahash_final_ctx; | ||
1425 | } else { | ||
1426 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | ||
1427 | DMA_TO_DEVICE); | ||
1428 | kfree(edesc); | ||
1429 | } | ||
1430 | } else if (*next_buflen) { | ||
1431 | state->update = ahash_update_no_ctx; | ||
1432 | state->finup = ahash_finup_no_ctx; | ||
1433 | state->final = ahash_final_no_ctx; | ||
1434 | sg_copy(next_buf, req->src, req->nbytes); | ||
1435 | } | ||
1436 | #ifdef DEBUG | ||
1437 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | ||
1438 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | ||
1439 | *next_buflen, 1); | ||
1440 | #endif | ||
1441 | |||
1442 | return ret; | ||
1443 | } | ||
1444 | |||
1445 | static int ahash_finup_first(struct ahash_request *req) | ||
1446 | { | ||
1447 | return ahash_digest(req); | ||
1448 | } | ||
1449 | |||
1450 | static int ahash_init(struct ahash_request *req) | ||
1451 | { | ||
1452 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1453 | |||
1454 | state->update = ahash_update_first; | ||
1455 | state->finup = ahash_finup_first; | ||
1456 | state->final = ahash_final_no_ctx; | ||
1457 | |||
1458 | state->current_buf = 0; | ||
1459 | |||
1460 | return 0; | ||
1461 | } | ||
1462 | |||
1463 | static int ahash_update(struct ahash_request *req) | ||
1464 | { | ||
1465 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1466 | |||
1467 | return state->update(req); | ||
1468 | } | ||
1469 | |||
1470 | static int ahash_finup(struct ahash_request *req) | ||
1471 | { | ||
1472 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1473 | |||
1474 | return state->finup(req); | ||
1475 | } | ||
1476 | |||
1477 | static int ahash_final(struct ahash_request *req) | ||
1478 | { | ||
1479 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1480 | |||
1481 | return state->final(req); | ||
1482 | } | ||
1483 | |||
1484 | static int ahash_export(struct ahash_request *req, void *out) | ||
1485 | { | ||
1486 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1487 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
1488 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1489 | |||
1490 | memcpy(out, ctx, sizeof(struct caam_hash_ctx)); | ||
1491 | memcpy(out + sizeof(struct caam_hash_ctx), state, | ||
1492 | sizeof(struct caam_hash_state)); | ||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | static int ahash_import(struct ahash_request *req, const void *in) | ||
1497 | { | ||
1498 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1499 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
1500 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
1501 | |||
1502 | memcpy(ctx, in, sizeof(struct caam_hash_ctx)); | ||
1503 | memcpy(state, in + sizeof(struct caam_hash_ctx), | ||
1504 | sizeof(struct caam_hash_state)); | ||
1505 | return 0; | ||
1506 | } | ||
1507 | |||
1508 | struct caam_hash_template { | ||
1509 | char name[CRYPTO_MAX_ALG_NAME]; | ||
1510 | char driver_name[CRYPTO_MAX_ALG_NAME]; | ||
1511 | char hmac_name[CRYPTO_MAX_ALG_NAME]; | ||
1512 | char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; | ||
1513 | unsigned int blocksize; | ||
1514 | struct ahash_alg template_ahash; | ||
1515 | u32 alg_type; | ||
1516 | u32 alg_op; | ||
1517 | }; | ||
1518 | |||
1519 | /* ahash descriptors */ | ||
1520 | static struct caam_hash_template driver_hash[] = { | ||
1521 | { | ||
1522 | .name = "sha1", | ||
1523 | .driver_name = "sha1-caam", | ||
1524 | .hmac_name = "hmac(sha1)", | ||
1525 | .hmac_driver_name = "hmac-sha1-caam", | ||
1526 | .blocksize = SHA1_BLOCK_SIZE, | ||
1527 | .template_ahash = { | ||
1528 | .init = ahash_init, | ||
1529 | .update = ahash_update, | ||
1530 | .final = ahash_final, | ||
1531 | .finup = ahash_finup, | ||
1532 | .digest = ahash_digest, | ||
1533 | .export = ahash_export, | ||
1534 | .import = ahash_import, | ||
1535 | .setkey = ahash_setkey, | ||
1536 | .halg = { | ||
1537 | .digestsize = SHA1_DIGEST_SIZE, | ||
1538 | }, | ||
1539 | }, | ||
1540 | .alg_type = OP_ALG_ALGSEL_SHA1, | ||
1541 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
1542 | }, { | ||
1543 | .name = "sha224", | ||
1544 | .driver_name = "sha224-caam", | ||
1545 | .hmac_name = "hmac(sha224)", | ||
1546 | .hmac_driver_name = "hmac-sha224-caam", | ||
1547 | .blocksize = SHA224_BLOCK_SIZE, | ||
1548 | .template_ahash = { | ||
1549 | .init = ahash_init, | ||
1550 | .update = ahash_update, | ||
1551 | .final = ahash_final, | ||
1552 | .finup = ahash_finup, | ||
1553 | .digest = ahash_digest, | ||
1554 | .export = ahash_export, | ||
1555 | .import = ahash_import, | ||
1556 | .setkey = ahash_setkey, | ||
1557 | .halg = { | ||
1558 | .digestsize = SHA224_DIGEST_SIZE, | ||
1559 | }, | ||
1560 | }, | ||
1561 | .alg_type = OP_ALG_ALGSEL_SHA224, | ||
1562 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
1563 | }, { | ||
1564 | .name = "sha256", | ||
1565 | .driver_name = "sha256-caam", | ||
1566 | .hmac_name = "hmac(sha256)", | ||
1567 | .hmac_driver_name = "hmac-sha256-caam", | ||
1568 | .blocksize = SHA256_BLOCK_SIZE, | ||
1569 | .template_ahash = { | ||
1570 | .init = ahash_init, | ||
1571 | .update = ahash_update, | ||
1572 | .final = ahash_final, | ||
1573 | .finup = ahash_finup, | ||
1574 | .digest = ahash_digest, | ||
1575 | .export = ahash_export, | ||
1576 | .import = ahash_import, | ||
1577 | .setkey = ahash_setkey, | ||
1578 | .halg = { | ||
1579 | .digestsize = SHA256_DIGEST_SIZE, | ||
1580 | }, | ||
1581 | }, | ||
1582 | .alg_type = OP_ALG_ALGSEL_SHA256, | ||
1583 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
1584 | }, { | ||
1585 | .name = "sha384", | ||
1586 | .driver_name = "sha384-caam", | ||
1587 | .hmac_name = "hmac(sha384)", | ||
1588 | .hmac_driver_name = "hmac-sha384-caam", | ||
1589 | .blocksize = SHA384_BLOCK_SIZE, | ||
1590 | .template_ahash = { | ||
1591 | .init = ahash_init, | ||
1592 | .update = ahash_update, | ||
1593 | .final = ahash_final, | ||
1594 | .finup = ahash_finup, | ||
1595 | .digest = ahash_digest, | ||
1596 | .export = ahash_export, | ||
1597 | .import = ahash_import, | ||
1598 | .setkey = ahash_setkey, | ||
1599 | .halg = { | ||
1600 | .digestsize = SHA384_DIGEST_SIZE, | ||
1601 | }, | ||
1602 | }, | ||
1603 | .alg_type = OP_ALG_ALGSEL_SHA384, | ||
1604 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
1605 | }, { | ||
1606 | .name = "sha512", | ||
1607 | .driver_name = "sha512-caam", | ||
1608 | .hmac_name = "hmac(sha512)", | ||
1609 | .hmac_driver_name = "hmac-sha512-caam", | ||
1610 | .blocksize = SHA512_BLOCK_SIZE, | ||
1611 | .template_ahash = { | ||
1612 | .init = ahash_init, | ||
1613 | .update = ahash_update, | ||
1614 | .final = ahash_final, | ||
1615 | .finup = ahash_finup, | ||
1616 | .digest = ahash_digest, | ||
1617 | .export = ahash_export, | ||
1618 | .import = ahash_import, | ||
1619 | .setkey = ahash_setkey, | ||
1620 | .halg = { | ||
1621 | .digestsize = SHA512_DIGEST_SIZE, | ||
1622 | }, | ||
1623 | }, | ||
1624 | .alg_type = OP_ALG_ALGSEL_SHA512, | ||
1625 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
1626 | }, { | ||
1627 | .name = "md5", | ||
1628 | .driver_name = "md5-caam", | ||
1629 | .hmac_name = "hmac(md5)", | ||
1630 | .hmac_driver_name = "hmac-md5-caam", | ||
1631 | .blocksize = MD5_BLOCK_WORDS * 4, | ||
1632 | .template_ahash = { | ||
1633 | .init = ahash_init, | ||
1634 | .update = ahash_update, | ||
1635 | .final = ahash_final, | ||
1636 | .finup = ahash_finup, | ||
1637 | .digest = ahash_digest, | ||
1638 | .export = ahash_export, | ||
1639 | .import = ahash_import, | ||
1640 | .setkey = ahash_setkey, | ||
1641 | .halg = { | ||
1642 | .digestsize = MD5_DIGEST_SIZE, | ||
1643 | }, | ||
1644 | }, | ||
1645 | .alg_type = OP_ALG_ALGSEL_MD5, | ||
1646 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
1647 | }, | ||
1648 | }; | ||
1649 | |||
1650 | struct caam_hash_alg { | ||
1651 | struct list_head entry; | ||
1652 | struct device *ctrldev; | ||
1653 | int alg_type; | ||
1654 | int alg_op; | ||
1655 | struct ahash_alg ahash_alg; | ||
1656 | }; | ||
1657 | |||
1658 | static int caam_hash_cra_init(struct crypto_tfm *tfm) | ||
1659 | { | ||
1660 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
1661 | struct crypto_alg *base = tfm->__crt_alg; | ||
1662 | struct hash_alg_common *halg = | ||
1663 | container_of(base, struct hash_alg_common, base); | ||
1664 | struct ahash_alg *alg = | ||
1665 | container_of(halg, struct ahash_alg, halg); | ||
1666 | struct caam_hash_alg *caam_hash = | ||
1667 | container_of(alg, struct caam_hash_alg, ahash_alg); | ||
1668 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1669 | struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev); | ||
1670 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ | ||
1671 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, | ||
1672 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, | ||
1673 | HASH_MSG_LEN + 32, | ||
1674 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, | ||
1675 | HASH_MSG_LEN + 64, | ||
1676 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; | ||
1677 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | ||
1678 | int ret = 0; | ||
1679 | |||
1680 | /* | ||
1681 | * distribute tfms across job rings to ensure in-order | ||
1682 | * crypto request processing per tfm | ||
1683 | */ | ||
1684 | ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; | ||
1685 | |||
1686 | /* copy descriptor header template value */ | ||
1687 | ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; | ||
1688 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; | ||
1689 | |||
1690 | ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | ||
1691 | OP_ALG_ALGSEL_SHIFT]; | ||
1692 | |||
1693 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1694 | sizeof(struct caam_hash_state)); | ||
1695 | |||
1696 | ret = ahash_set_sh_desc(ahash); | ||
1697 | |||
1698 | return ret; | ||
1699 | } | ||
1700 | |||
1701 | static void caam_hash_cra_exit(struct crypto_tfm *tfm) | ||
1702 | { | ||
1703 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1704 | |||
1705 | if (ctx->sh_desc_update_dma && | ||
1706 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) | ||
1707 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, | ||
1708 | desc_bytes(ctx->sh_desc_update), | ||
1709 | DMA_TO_DEVICE); | ||
1710 | if (ctx->sh_desc_update_first_dma && | ||
1711 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) | ||
1712 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, | ||
1713 | desc_bytes(ctx->sh_desc_update_first), | ||
1714 | DMA_TO_DEVICE); | ||
1715 | if (ctx->sh_desc_fin_dma && | ||
1716 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) | ||
1717 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, | ||
1718 | desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); | ||
1719 | if (ctx->sh_desc_digest_dma && | ||
1720 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) | ||
1721 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, | ||
1722 | desc_bytes(ctx->sh_desc_digest), | ||
1723 | DMA_TO_DEVICE); | ||
1724 | if (ctx->sh_desc_finup_dma && | ||
1725 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) | ||
1726 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, | ||
1727 | desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); | ||
1728 | } | ||
1729 | |||
1730 | static void __exit caam_algapi_hash_exit(void) | ||
1731 | { | ||
1732 | struct device_node *dev_node; | ||
1733 | struct platform_device *pdev; | ||
1734 | struct device *ctrldev; | ||
1735 | struct caam_drv_private *priv; | ||
1736 | struct caam_hash_alg *t_alg, *n; | ||
1737 | |||
1738 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1739 | if (!dev_node) | ||
1740 | return; | ||
1741 | |||
1742 | pdev = of_find_device_by_node(dev_node); | ||
1743 | if (!pdev) | ||
1744 | return; | ||
1745 | |||
1746 | ctrldev = &pdev->dev; | ||
1747 | of_node_put(dev_node); | ||
1748 | priv = dev_get_drvdata(ctrldev); | ||
1749 | |||
1750 | if (!priv->hash_list.next) | ||
1751 | return; | ||
1752 | |||
1753 | list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) { | ||
1754 | crypto_unregister_ahash(&t_alg->ahash_alg); | ||
1755 | list_del(&t_alg->entry); | ||
1756 | kfree(t_alg); | ||
1757 | } | ||
1758 | } | ||
1759 | |||
1760 | static struct caam_hash_alg * | ||
1761 | caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, | ||
1762 | bool keyed) | ||
1763 | { | ||
1764 | struct caam_hash_alg *t_alg; | ||
1765 | struct ahash_alg *halg; | ||
1766 | struct crypto_alg *alg; | ||
1767 | |||
1768 | t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); | ||
1769 | if (!t_alg) { | ||
1770 | dev_err(ctrldev, "failed to allocate t_alg\n"); | ||
1771 | return ERR_PTR(-ENOMEM); | ||
1772 | } | ||
1773 | |||
1774 | t_alg->ahash_alg = template->template_ahash; | ||
1775 | halg = &t_alg->ahash_alg; | ||
1776 | alg = &halg->halg.base; | ||
1777 | |||
1778 | if (keyed) { | ||
1779 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
1780 | template->hmac_name); | ||
1781 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
1782 | template->hmac_driver_name); | ||
1783 | } else { | ||
1784 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
1785 | template->name); | ||
1786 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
1787 | template->driver_name); | ||
1788 | } | ||
1789 | alg->cra_module = THIS_MODULE; | ||
1790 | alg->cra_init = caam_hash_cra_init; | ||
1791 | alg->cra_exit = caam_hash_cra_exit; | ||
1792 | alg->cra_ctxsize = sizeof(struct caam_hash_ctx); | ||
1793 | alg->cra_priority = CAAM_CRA_PRIORITY; | ||
1794 | alg->cra_blocksize = template->blocksize; | ||
1795 | alg->cra_alignmask = 0; | ||
1796 | alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; | ||
1797 | alg->cra_type = &crypto_ahash_type; | ||
1798 | |||
1799 | t_alg->alg_type = template->alg_type; | ||
1800 | t_alg->alg_op = template->alg_op; | ||
1801 | t_alg->ctrldev = ctrldev; | ||
1802 | |||
1803 | return t_alg; | ||
1804 | } | ||
1805 | |||
1806 | static int __init caam_algapi_hash_init(void) | ||
1807 | { | ||
1808 | struct device_node *dev_node; | ||
1809 | struct platform_device *pdev; | ||
1810 | struct device *ctrldev; | ||
1811 | struct caam_drv_private *priv; | ||
1812 | int i = 0, err = 0; | ||
1813 | |||
1814 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1815 | if (!dev_node) | ||
1816 | return -ENODEV; | ||
1817 | |||
1818 | pdev = of_find_device_by_node(dev_node); | ||
1819 | if (!pdev) | ||
1820 | return -ENODEV; | ||
1821 | |||
1822 | ctrldev = &pdev->dev; | ||
1823 | priv = dev_get_drvdata(ctrldev); | ||
1824 | of_node_put(dev_node); | ||
1825 | |||
1826 | INIT_LIST_HEAD(&priv->hash_list); | ||
1827 | |||
1828 | atomic_set(&priv->tfm_count, -1); | ||
1829 | |||
1830 | /* register crypto algorithms the device supports */ | ||
1831 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { | ||
1832 | /* TODO: check if h/w supports alg */ | ||
1833 | struct caam_hash_alg *t_alg; | ||
1834 | |||
1835 | /* register hmac version */ | ||
1836 | t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); | ||
1837 | if (IS_ERR(t_alg)) { | ||
1838 | err = PTR_ERR(t_alg); | ||
1839 | dev_warn(ctrldev, "%s alg allocation failed\n", | ||
1840 | driver_hash[i].driver_name); | ||
1841 | continue; | ||
1842 | } | ||
1843 | |||
1844 | err = crypto_register_ahash(&t_alg->ahash_alg); | ||
1845 | if (err) { | ||
1846 | dev_warn(ctrldev, "%s alg registration failed\n", | ||
1847 | t_alg->ahash_alg.halg.base.cra_driver_name); | ||
1848 | kfree(t_alg); | ||
1849 | } else | ||
1850 | list_add_tail(&t_alg->entry, &priv->hash_list); | ||
1851 | |||
1852 | /* register unkeyed version */ | ||
1853 | t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); | ||
1854 | if (IS_ERR(t_alg)) { | ||
1855 | err = PTR_ERR(t_alg); | ||
1856 | dev_warn(ctrldev, "%s alg allocation failed\n", | ||
1857 | driver_hash[i].driver_name); | ||
1858 | continue; | ||
1859 | } | ||
1860 | |||
1861 | err = crypto_register_ahash(&t_alg->ahash_alg); | ||
1862 | if (err) { | ||
1863 | dev_warn(ctrldev, "%s alg registration failed\n", | ||
1864 | t_alg->ahash_alg.halg.base.cra_driver_name); | ||
1865 | kfree(t_alg); | ||
1866 | } else | ||
1867 | list_add_tail(&t_alg->entry, &priv->hash_list); | ||
1868 | } | ||
1869 | |||
1870 | return err; | ||
1871 | } | ||
1872 | |||
1873 | module_init(caam_algapi_hash_init); | ||
1874 | module_exit(caam_algapi_hash_exit); | ||
1875 | |||
1876 | MODULE_LICENSE("GPL"); | ||
1877 | MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); | ||
1878 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); | ||
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c new file mode 100644 index 000000000000..e2bfe161dece --- /dev/null +++ b/drivers/crypto/caam/caamrng.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* | ||
2 | * caam - Freescale FSL CAAM support for hw_random | ||
3 | * | ||
4 | * Copyright 2011 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * Based on caamalg.c crypto API driver. | ||
7 | * | ||
8 | * relationship between job descriptors to shared descriptors: | ||
9 | * | ||
10 | * --------------- -------------- | ||
11 | * | JobDesc #0 |-------------------->| ShareDesc | | ||
12 | * | *(buffer 0) | |------------->| (generate) | | ||
13 | * --------------- | | (move) | | ||
14 | * | | (store) | | ||
15 | * --------------- | -------------- | ||
16 | * | JobDesc #1 |------| | ||
17 | * | *(buffer 1) | | ||
18 | * --------------- | ||
19 | * | ||
20 | * A job desc looks like this: | ||
21 | * | ||
22 | * --------------------- | ||
23 | * | Header | | ||
24 | * | ShareDesc Pointer | | ||
25 | * | SEQ_OUT_PTR | | ||
26 | * | (output buffer) | | ||
27 | * --------------------- | ||
28 | * | ||
29 | * The SharedDesc never changes, and each job descriptor points to one of two | ||
30 | * buffers for each device, from which the data will be copied into the | ||
31 | * requested destination | ||
32 | */ | ||
33 | |||
34 | #include <linux/hw_random.h> | ||
35 | #include <linux/completion.h> | ||
36 | #include <linux/atomic.h> | ||
37 | |||
38 | #include "compat.h" | ||
39 | |||
40 | #include "regs.h" | ||
41 | #include "intern.h" | ||
42 | #include "desc_constr.h" | ||
43 | #include "jr.h" | ||
44 | #include "error.h" | ||
45 | |||
46 | /* | ||
47 | * Maximum buffer size: maximum number of random, cache-aligned bytes that | ||
48 | * will be generated and moved to seq out ptr (extlen not allowed) | ||
49 | */ | ||
50 | #define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \ | ||
51 | L1_CACHE_BYTES) | ||
52 | |||
53 | /* length of descriptors */ | ||
54 | #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) | ||
55 | #define DESC_RNG_LEN (10 * CAAM_CMD_SZ) | ||
56 | |||
57 | /* Buffer, its dma address and lock */ | ||
58 | struct buf_data { | ||
59 | u8 buf[RN_BUF_SIZE]; | ||
60 | dma_addr_t addr; | ||
61 | struct completion filled; | ||
62 | u32 hw_desc[DESC_JOB_O_LEN]; | ||
63 | #define BUF_NOT_EMPTY 0 | ||
64 | #define BUF_EMPTY 1 | ||
65 | #define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */ | ||
66 | atomic_t empty; | ||
67 | }; | ||
68 | |||
69 | /* rng per-device context */ | ||
70 | struct caam_rng_ctx { | ||
71 | struct device *jrdev; | ||
72 | dma_addr_t sh_desc_dma; | ||
73 | u32 sh_desc[DESC_RNG_LEN]; | ||
74 | unsigned int cur_buf_idx; | ||
75 | int current_buf; | ||
76 | struct buf_data bufs[2]; | ||
77 | }; | ||
78 | |||
79 | static struct caam_rng_ctx rng_ctx; | ||
80 | |||
81 | static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) | ||
82 | { | ||
83 | if (bd->addr) | ||
84 | dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE, | ||
85 | DMA_FROM_DEVICE); | ||
86 | } | ||
87 | |||
88 | static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx) | ||
89 | { | ||
90 | struct device *jrdev = ctx->jrdev; | ||
91 | |||
92 | if (ctx->sh_desc_dma) | ||
93 | dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN, | ||
94 | DMA_TO_DEVICE); | ||
95 | rng_unmap_buf(jrdev, &ctx->bufs[0]); | ||
96 | rng_unmap_buf(jrdev, &ctx->bufs[1]); | ||
97 | } | ||
98 | |||
99 | static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) | ||
100 | { | ||
101 | struct buf_data *bd; | ||
102 | |||
103 | bd = (struct buf_data *)((char *)desc - | ||
104 | offsetof(struct buf_data, hw_desc)); | ||
105 | |||
106 | if (err) { | ||
107 | char tmp[CAAM_ERROR_STR_MAX]; | ||
108 | |||
109 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
110 | } | ||
111 | |||
112 | atomic_set(&bd->empty, BUF_NOT_EMPTY); | ||
113 | complete(&bd->filled); | ||
114 | #ifdef DEBUG | ||
115 | print_hex_dump(KERN_ERR, "rng refreshed buf@: ", | ||
116 | DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); | ||
117 | #endif | ||
118 | } | ||
119 | |||
120 | static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) | ||
121 | { | ||
122 | struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)]; | ||
123 | struct device *jrdev = ctx->jrdev; | ||
124 | u32 *desc = bd->hw_desc; | ||
125 | int err; | ||
126 | |||
127 | dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf)); | ||
128 | init_completion(&bd->filled); | ||
129 | err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); | ||
130 | if (err) | ||
131 | complete(&bd->filled); /* don't wait on failed job*/ | ||
132 | else | ||
133 | atomic_inc(&bd->empty); /* note if pending */ | ||
134 | |||
135 | return err; | ||
136 | } | ||
137 | |||
138 | static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) | ||
139 | { | ||
140 | struct caam_rng_ctx *ctx = &rng_ctx; | ||
141 | struct buf_data *bd = &ctx->bufs[ctx->current_buf]; | ||
142 | int next_buf_idx, copied_idx; | ||
143 | int err; | ||
144 | |||
145 | if (atomic_read(&bd->empty)) { | ||
146 | /* try to submit job if there wasn't one */ | ||
147 | if (atomic_read(&bd->empty) == BUF_EMPTY) { | ||
148 | err = submit_job(ctx, 1); | ||
149 | /* if can't submit job, can't even wait */ | ||
150 | if (err) | ||
151 | return 0; | ||
152 | } | ||
153 | /* no immediate data, so exit if not waiting */ | ||
154 | if (!wait) | ||
155 | return 0; | ||
156 | |||
157 | /* waiting for pending job */ | ||
158 | if (atomic_read(&bd->empty)) | ||
159 | wait_for_completion(&bd->filled); | ||
160 | } | ||
161 | |||
162 | next_buf_idx = ctx->cur_buf_idx + max; | ||
163 | dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n", | ||
164 | __func__, ctx->current_buf, ctx->cur_buf_idx); | ||
165 | |||
166 | /* if enough data in current buffer */ | ||
167 | if (next_buf_idx < RN_BUF_SIZE) { | ||
168 | memcpy(data, bd->buf + ctx->cur_buf_idx, max); | ||
169 | ctx->cur_buf_idx = next_buf_idx; | ||
170 | return max; | ||
171 | } | ||
172 | |||
173 | /* else, copy what's left... */ | ||
174 | copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx; | ||
175 | memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx); | ||
176 | ctx->cur_buf_idx = 0; | ||
177 | atomic_set(&bd->empty, BUF_EMPTY); | ||
178 | |||
179 | /* ...refill... */ | ||
180 | submit_job(ctx, 1); | ||
181 | |||
182 | /* and use next buffer */ | ||
183 | ctx->current_buf = !ctx->current_buf; | ||
184 | dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf); | ||
185 | |||
186 | /* since there already is some data read, don't wait */ | ||
187 | return copied_idx + caam_read(rng, data + copied_idx, | ||
188 | max - copied_idx, false); | ||
189 | } | ||
190 | |||
191 | static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) | ||
192 | { | ||
193 | struct device *jrdev = ctx->jrdev; | ||
194 | u32 *desc = ctx->sh_desc; | ||
195 | |||
196 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
197 | |||
198 | /* Propagate errors from shared to job descriptor */ | ||
199 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
200 | |||
201 | /* Generate random bytes */ | ||
202 | append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); | ||
203 | |||
204 | /* Store bytes */ | ||
205 | append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE); | ||
206 | |||
207 | ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
208 | DMA_TO_DEVICE); | ||
209 | #ifdef DEBUG | ||
210 | print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | ||
211 | desc, desc_bytes(desc), 1); | ||
212 | #endif | ||
213 | } | ||
214 | |||
215 | static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) | ||
216 | { | ||
217 | struct device *jrdev = ctx->jrdev; | ||
218 | struct buf_data *bd = &ctx->bufs[buf_id]; | ||
219 | u32 *desc = bd->hw_desc; | ||
220 | int sh_len = desc_len(ctx->sh_desc); | ||
221 | |||
222 | init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER | | ||
223 | HDR_REVERSE); | ||
224 | |||
225 | bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); | ||
226 | |||
227 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); | ||
228 | #ifdef DEBUG | ||
229 | print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | ||
230 | desc, desc_bytes(desc), 1); | ||
231 | #endif | ||
232 | } | ||
233 | |||
234 | static void caam_cleanup(struct hwrng *rng) | ||
235 | { | ||
236 | int i; | ||
237 | struct buf_data *bd; | ||
238 | |||
239 | for (i = 0; i < 2; i++) { | ||
240 | bd = &rng_ctx.bufs[i]; | ||
241 | if (atomic_read(&bd->empty) == BUF_PENDING) | ||
242 | wait_for_completion(&bd->filled); | ||
243 | } | ||
244 | |||
245 | rng_unmap_ctx(&rng_ctx); | ||
246 | } | ||
247 | |||
248 | static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) | ||
249 | { | ||
250 | struct buf_data *bd = &ctx->bufs[buf_id]; | ||
251 | |||
252 | rng_create_job_desc(ctx, buf_id); | ||
253 | atomic_set(&bd->empty, BUF_EMPTY); | ||
254 | submit_job(ctx, buf_id == ctx->current_buf); | ||
255 | wait_for_completion(&bd->filled); | ||
256 | } | ||
257 | |||
258 | static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) | ||
259 | { | ||
260 | ctx->jrdev = jrdev; | ||
261 | rng_create_sh_desc(ctx); | ||
262 | ctx->current_buf = 0; | ||
263 | ctx->cur_buf_idx = 0; | ||
264 | caam_init_buf(ctx, 0); | ||
265 | caam_init_buf(ctx, 1); | ||
266 | } | ||
267 | |||
268 | static struct hwrng caam_rng = { | ||
269 | .name = "rng-caam", | ||
270 | .cleanup = caam_cleanup, | ||
271 | .read = caam_read, | ||
272 | }; | ||
273 | |||
274 | static void __exit caam_rng_exit(void) | ||
275 | { | ||
276 | hwrng_unregister(&caam_rng); | ||
277 | } | ||
278 | |||
279 | static int __init caam_rng_init(void) | ||
280 | { | ||
281 | struct device_node *dev_node; | ||
282 | struct platform_device *pdev; | ||
283 | struct device *ctrldev; | ||
284 | struct caam_drv_private *priv; | ||
285 | |||
286 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
287 | if (!dev_node) | ||
288 | return -ENODEV; | ||
289 | |||
290 | pdev = of_find_device_by_node(dev_node); | ||
291 | if (!pdev) | ||
292 | return -ENODEV; | ||
293 | |||
294 | ctrldev = &pdev->dev; | ||
295 | priv = dev_get_drvdata(ctrldev); | ||
296 | of_node_put(dev_node); | ||
297 | |||
298 | caam_init_rng(&rng_ctx, priv->jrdev[0]); | ||
299 | |||
300 | dev_info(priv->jrdev[0], "registering rng-caam\n"); | ||
301 | return hwrng_register(&caam_rng); | ||
302 | } | ||
303 | |||
304 | module_init(caam_rng_init); | ||
305 | module_exit(caam_rng_exit); | ||
306 | |||
307 | MODULE_LICENSE("GPL"); | ||
308 | MODULE_DESCRIPTION("FSL CAAM support for hw_random API"); | ||
309 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); | ||
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index a63bc65fae86..762aeff626ac 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/crypto.h> | 13 | #include <linux/crypto.h> |
14 | #include <linux/hash.h> | ||
14 | #include <linux/hw_random.h> | 15 | #include <linux/hw_random.h> |
15 | #include <linux/of_platform.h> | 16 | #include <linux/of_platform.h> |
16 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
@@ -33,5 +34,6 @@ | |||
33 | #include <crypto/authenc.h> | 34 | #include <crypto/authenc.h> |
34 | #include <crypto/scatterwalk.h> | 35 | #include <crypto/scatterwalk.h> |
35 | #include <crypto/internal/skcipher.h> | 36 | #include <crypto/internal/skcipher.h> |
37 | #include <crypto/internal/hash.h> | ||
36 | 38 | ||
37 | #endif /* !defined(CAAM_COMPAT_H) */ | 39 | #endif /* !defined(CAAM_COMPAT_H) */ |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 77557ebcd337..414ba20c05a1 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -2,13 +2,16 @@ | |||
2 | * CAAM control-plane driver backend | 2 | * CAAM control-plane driver backend |
3 | * Controller-level driver, kernel property detection, initialization | 3 | * Controller-level driver, kernel property detection, initialization |
4 | * | 4 | * |
5 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "compat.h" | 8 | #include "compat.h" |
9 | #include "regs.h" | 9 | #include "regs.h" |
10 | #include "intern.h" | 10 | #include "intern.h" |
11 | #include "jr.h" | 11 | #include "jr.h" |
12 | #include "desc_constr.h" | ||
13 | #include "error.h" | ||
14 | #include "ctrl.h" | ||
12 | 15 | ||
13 | static int caam_remove(struct platform_device *pdev) | 16 | static int caam_remove(struct platform_device *pdev) |
14 | { | 17 | { |
@@ -43,10 +46,154 @@ static int caam_remove(struct platform_device *pdev) | |||
43 | return ret; | 46 | return ret; |
44 | } | 47 | } |
45 | 48 | ||
49 | /* | ||
50 | * Descriptor to instantiate RNG State Handle 0 in normal mode and | ||
51 | * load the JDKEK, TDKEK and TDSK registers | ||
52 | */ | ||
53 | static void build_instantiation_desc(u32 *desc) | ||
54 | { | ||
55 | u32 *jump_cmd; | ||
56 | |||
57 | init_job_desc(desc, 0); | ||
58 | |||
59 | /* INIT RNG in non-test mode */ | ||
60 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | ||
61 | OP_ALG_AS_INIT); | ||
62 | |||
63 | /* wait for done */ | ||
64 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); | ||
65 | set_jump_tgt_here(desc, jump_cmd); | ||
66 | |||
67 | /* | ||
68 | * load 1 to clear written reg: | ||
69 | * resets the done interrrupt and returns the RNG to idle. | ||
70 | */ | ||
71 | append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); | ||
72 | |||
73 | /* generate secure keys (non-test) */ | ||
74 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | ||
75 | OP_ALG_RNG4_SK); | ||
76 | } | ||
77 | |||
78 | struct instantiate_result { | ||
79 | struct completion completion; | ||
80 | int err; | ||
81 | }; | ||
82 | |||
83 | static void rng4_init_done(struct device *dev, u32 *desc, u32 err, | ||
84 | void *context) | ||
85 | { | ||
86 | struct instantiate_result *instantiation = context; | ||
87 | |||
88 | if (err) { | ||
89 | char tmp[CAAM_ERROR_STR_MAX]; | ||
90 | |||
91 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
92 | } | ||
93 | |||
94 | instantiation->err = err; | ||
95 | complete(&instantiation->completion); | ||
96 | } | ||
97 | |||
98 | static int instantiate_rng(struct device *jrdev) | ||
99 | { | ||
100 | struct instantiate_result instantiation; | ||
101 | |||
102 | dma_addr_t desc_dma; | ||
103 | u32 *desc; | ||
104 | int ret; | ||
105 | |||
106 | desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); | ||
107 | if (!desc) { | ||
108 | dev_err(jrdev, "cannot allocate RNG init descriptor memory\n"); | ||
109 | return -ENOMEM; | ||
110 | } | ||
111 | |||
112 | build_instantiation_desc(desc); | ||
113 | desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); | ||
114 | init_completion(&instantiation.completion); | ||
115 | ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation); | ||
116 | if (!ret) { | ||
117 | wait_for_completion_interruptible(&instantiation.completion); | ||
118 | ret = instantiation.err; | ||
119 | if (ret) | ||
120 | dev_err(jrdev, "unable to instantiate RNG\n"); | ||
121 | } | ||
122 | |||
123 | dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE); | ||
124 | |||
125 | kfree(desc); | ||
126 | |||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * By default, the TRNG runs for 200 clocks per sample; | ||
132 | * 800 clocks per sample generates better entropy. | ||
133 | */ | ||
134 | static void kick_trng(struct platform_device *pdev) | ||
135 | { | ||
136 | struct device *ctrldev = &pdev->dev; | ||
137 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | ||
138 | struct caam_full __iomem *topregs; | ||
139 | struct rng4tst __iomem *r4tst; | ||
140 | u32 val; | ||
141 | |||
142 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
143 | r4tst = &topregs->ctrl.r4tst[0]; | ||
144 | |||
145 | /* put RNG4 into program mode */ | ||
146 | setbits32(&r4tst->rtmctl, RTMCTL_PRGM); | ||
147 | /* 800 clocks per sample */ | ||
148 | val = rd_reg32(&r4tst->rtsdctl); | ||
149 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | (800 << RTSDCTL_ENT_DLY_SHIFT); | ||
150 | wr_reg32(&r4tst->rtsdctl, val); | ||
151 | /* min. freq. count */ | ||
152 | wr_reg32(&r4tst->rtfrqmin, 400); | ||
153 | /* max. freq. count */ | ||
154 | wr_reg32(&r4tst->rtfrqmax, 6400); | ||
155 | /* put RNG4 into run mode */ | ||
156 | clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * caam_get_era() - Return the ERA of the SEC on SoC, based | ||
161 | * on the SEC_VID register. | ||
162 | * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown. | ||
163 | * @caam_id - the value of the SEC_VID register | ||
164 | **/ | ||
165 | int caam_get_era(u64 caam_id) | ||
166 | { | ||
167 | struct sec_vid *sec_vid = (struct sec_vid *)&caam_id; | ||
168 | static const struct { | ||
169 | u16 ip_id; | ||
170 | u8 maj_rev; | ||
171 | u8 era; | ||
172 | } caam_eras[] = { | ||
173 | {0x0A10, 1, 1}, | ||
174 | {0x0A10, 2, 2}, | ||
175 | {0x0A12, 1, 3}, | ||
176 | {0x0A14, 1, 3}, | ||
177 | {0x0A14, 2, 4}, | ||
178 | {0x0A16, 1, 4}, | ||
179 | {0x0A11, 1, 4} | ||
180 | }; | ||
181 | int i; | ||
182 | |||
183 | for (i = 0; i < ARRAY_SIZE(caam_eras); i++) | ||
184 | if (caam_eras[i].ip_id == sec_vid->ip_id && | ||
185 | caam_eras[i].maj_rev == sec_vid->maj_rev) | ||
186 | return caam_eras[i].era; | ||
187 | |||
188 | return -ENOTSUPP; | ||
189 | } | ||
190 | EXPORT_SYMBOL(caam_get_era); | ||
191 | |||
46 | /* Probe routine for CAAM top (controller) level */ | 192 | /* Probe routine for CAAM top (controller) level */ |
47 | static int caam_probe(struct platform_device *pdev) | 193 | static int caam_probe(struct platform_device *pdev) |
48 | { | 194 | { |
49 | int ring, rspec; | 195 | int ret, ring, rspec; |
196 | u64 caam_id; | ||
50 | struct device *dev; | 197 | struct device *dev; |
51 | struct device_node *nprop, *np; | 198 | struct device_node *nprop, *np; |
52 | struct caam_ctrl __iomem *ctrl; | 199 | struct caam_ctrl __iomem *ctrl; |
@@ -82,13 +229,18 @@ static int caam_probe(struct platform_device *pdev) | |||
82 | 229 | ||
83 | /* | 230 | /* |
84 | * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, | 231 | * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, |
85 | * 36-bit pointers in master configuration register | 232 | * long pointers in master configuration register |
86 | */ | 233 | */ |
87 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | | 234 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | |
88 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); | 235 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); |
89 | 236 | ||
90 | if (sizeof(dma_addr_t) == sizeof(u64)) | 237 | if (sizeof(dma_addr_t) == sizeof(u64)) |
91 | dma_set_mask(dev, DMA_BIT_MASK(36)); | 238 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) |
239 | dma_set_mask(dev, DMA_BIT_MASK(40)); | ||
240 | else | ||
241 | dma_set_mask(dev, DMA_BIT_MASK(36)); | ||
242 | else | ||
243 | dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
92 | 244 | ||
93 | /* | 245 | /* |
94 | * Detect and enable JobRs | 246 | * Detect and enable JobRs |
@@ -141,14 +293,29 @@ static int caam_probe(struct platform_device *pdev) | |||
141 | return -ENOMEM; | 293 | return -ENOMEM; |
142 | } | 294 | } |
143 | 295 | ||
296 | /* | ||
297 | * RNG4 based SECs (v5+) need special initialization prior | ||
298 | * to executing any descriptors | ||
299 | */ | ||
300 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) { | ||
301 | kick_trng(pdev); | ||
302 | ret = instantiate_rng(ctrlpriv->jrdev[0]); | ||
303 | if (ret) { | ||
304 | caam_remove(pdev); | ||
305 | return ret; | ||
306 | } | ||
307 | } | ||
308 | |||
144 | /* NOTE: RTIC detection ought to go here, around Si time */ | 309 | /* NOTE: RTIC detection ought to go here, around Si time */ |
145 | 310 | ||
146 | /* Initialize queue allocator lock */ | 311 | /* Initialize queue allocator lock */ |
147 | spin_lock_init(&ctrlpriv->jr_alloc_lock); | 312 | spin_lock_init(&ctrlpriv->jr_alloc_lock); |
148 | 313 | ||
314 | caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); | ||
315 | |||
149 | /* Report "alive" for developer to see */ | 316 | /* Report "alive" for developer to see */ |
150 | dev_info(dev, "device ID = 0x%016llx\n", | 317 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, |
151 | rd_reg64(&topregs->ctrl.perfmon.caam_id)); | 318 | caam_get_era(caam_id)); |
152 | dev_info(dev, "job rings = %d, qi = %d\n", | 319 | dev_info(dev, "job rings = %d, qi = %d\n", |
153 | ctrlpriv->total_jobrs, ctrlpriv->qi_present); | 320 | ctrlpriv->total_jobrs, ctrlpriv->qi_present); |
154 | 321 | ||
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h new file mode 100644 index 000000000000..980d44eaaf40 --- /dev/null +++ b/drivers/crypto/caam/ctrl.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* | ||
2 | * CAAM control-plane driver backend public-level include definitions | ||
3 | * | ||
4 | * Copyright 2012 Freescale Semiconductor, Inc. | ||
5 | */ | ||
6 | |||
7 | #ifndef CTRL_H | ||
8 | #define CTRL_H | ||
9 | |||
10 | /* Prototypes for backend-level services exposed to APIs */ | ||
11 | int caam_get_era(u64 caam_id); | ||
12 | |||
13 | #endif /* CTRL_H */ | ||
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index a17c2958dab1..f7f833be8c67 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
@@ -8,6 +8,16 @@ | |||
8 | #ifndef DESC_H | 8 | #ifndef DESC_H |
9 | #define DESC_H | 9 | #define DESC_H |
10 | 10 | ||
11 | struct sec4_sg_entry { | ||
12 | u64 ptr; | ||
13 | #define SEC4_SG_LEN_FIN 0x40000000 | ||
14 | #define SEC4_SG_LEN_EXT 0x80000000 | ||
15 | u32 len; | ||
16 | u8 reserved; | ||
17 | u8 buf_pool_id; | ||
18 | u16 offset; | ||
19 | }; | ||
20 | |||
11 | /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ | 21 | /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ |
12 | #define MAX_CAAM_DESCSIZE 64 | 22 | #define MAX_CAAM_DESCSIZE 64 |
13 | 23 | ||
@@ -1162,6 +1172,11 @@ | |||
1162 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) | 1172 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) |
1163 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) | 1173 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) |
1164 | 1174 | ||
1175 | /* RNG4 set */ | ||
1176 | #define OP_ALG_RNG4_SHIFT 4 | ||
1177 | #define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT) | ||
1178 | |||
1179 | #define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT) | ||
1165 | 1180 | ||
1166 | #define OP_ALG_AS_SHIFT 2 | 1181 | #define OP_ALG_AS_SHIFT 2 |
1167 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) | 1182 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) |
@@ -1585,20 +1600,4 @@ | |||
1585 | #define NFIFOENTRY_PLEN_SHIFT 0 | 1600 | #define NFIFOENTRY_PLEN_SHIFT 0 |
1586 | #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) | 1601 | #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) |
1587 | 1602 | ||
1588 | /* | ||
1589 | * PDB internal definitions | ||
1590 | */ | ||
1591 | |||
1592 | /* IPSec ESP CBC Encap/Decap Options */ | ||
1593 | #define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */ | ||
1594 | #define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */ | ||
1595 | #define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */ | ||
1596 | #define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */ | ||
1597 | #define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */ | ||
1598 | #define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */ | ||
1599 | #define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ | ||
1600 | #define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */ | ||
1601 | #define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */ | ||
1602 | #define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */ | ||
1603 | |||
1604 | #endif /* DESC_H */ | 1603 | #endif /* DESC_H */ |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 348b882275f0..c85c1f058401 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * caam descriptor construction helper functions | 2 | * caam descriptor construction helper functions |
3 | * | 3 | * |
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 4 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "desc.h" | 7 | #include "desc.h" |
@@ -51,7 +51,7 @@ static inline void *sh_desc_pdb(u32 *desc) | |||
51 | 51 | ||
52 | static inline void init_desc(u32 *desc, u32 options) | 52 | static inline void init_desc(u32 *desc, u32 options) |
53 | { | 53 | { |
54 | *desc = options | HDR_ONE | 1; | 54 | *desc = (options | HDR_ONE) + 1; |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline void init_sh_desc(u32 *desc, u32 options) | 57 | static inline void init_sh_desc(u32 *desc, u32 options) |
@@ -62,9 +62,9 @@ static inline void init_sh_desc(u32 *desc, u32 options) | |||
62 | 62 | ||
63 | static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) | 63 | static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) |
64 | { | 64 | { |
65 | u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1; | 65 | u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; |
66 | 66 | ||
67 | init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) | | 67 | init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) | |
68 | options); | 68 | options); |
69 | } | 69 | } |
70 | 70 | ||
@@ -117,6 +117,15 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, | |||
117 | append_ptr(desc, ptr); | 117 | append_ptr(desc, ptr); |
118 | } | 118 | } |
119 | 119 | ||
120 | /* Write length after pointer, rather than inside command */ | ||
121 | static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, | ||
122 | unsigned int len, u32 command) | ||
123 | { | ||
124 | append_cmd(desc, command); | ||
125 | append_ptr(desc, ptr); | ||
126 | append_cmd(desc, len); | ||
127 | } | ||
128 | |||
120 | static inline void append_cmd_data(u32 *desc, void *data, int len, | 129 | static inline void append_cmd_data(u32 *desc, void *data, int len, |
121 | u32 command) | 130 | u32 command) |
122 | { | 131 | { |
@@ -166,13 +175,22 @@ static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \ | |||
166 | append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ | 175 | append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ |
167 | } | 176 | } |
168 | APPEND_CMD_PTR(key, KEY) | 177 | APPEND_CMD_PTR(key, KEY) |
169 | APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR) | ||
170 | APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR) | ||
171 | APPEND_CMD_PTR(load, LOAD) | 178 | APPEND_CMD_PTR(load, LOAD) |
172 | APPEND_CMD_PTR(store, STORE) | 179 | APPEND_CMD_PTR(store, STORE) |
173 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) | 180 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) |
174 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) | 181 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) |
175 | 182 | ||
183 | #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ | ||
184 | static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \ | ||
185 | unsigned int len, \ | ||
186 | u32 options) \ | ||
187 | { \ | ||
188 | PRINT_POS; \ | ||
189 | append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ | ||
190 | } | ||
191 | APPEND_SEQ_PTR_INTLEN(in, IN) | ||
192 | APPEND_SEQ_PTR_INTLEN(out, OUT) | ||
193 | |||
176 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ | 194 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ |
177 | static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | 195 | static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ |
178 | unsigned int len, u32 options) \ | 196 | unsigned int len, u32 options) \ |
@@ -183,6 +201,33 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | |||
183 | APPEND_CMD_PTR_TO_IMM(load, LOAD); | 201 | APPEND_CMD_PTR_TO_IMM(load, LOAD); |
184 | APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); | 202 | APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); |
185 | 203 | ||
204 | #define APPEND_CMD_PTR_EXTLEN(cmd, op) \ | ||
205 | static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \ | ||
206 | unsigned int len, u32 options) \ | ||
207 | { \ | ||
208 | PRINT_POS; \ | ||
209 | append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \ | ||
210 | } | ||
211 | APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR) | ||
212 | APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR) | ||
213 | |||
214 | /* | ||
215 | * Determine whether to store length internally or externally depending on | ||
216 | * the size of its type | ||
217 | */ | ||
218 | #define APPEND_CMD_PTR_LEN(cmd, op, type) \ | ||
219 | static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \ | ||
220 | type len, u32 options) \ | ||
221 | { \ | ||
222 | PRINT_POS; \ | ||
223 | if (sizeof(type) > sizeof(u16)) \ | ||
224 | append_##cmd##_extlen(desc, ptr, len, options); \ | ||
225 | else \ | ||
226 | append_##cmd##_intlen(desc, ptr, len, options); \ | ||
227 | } | ||
228 | APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32) | ||
229 | APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32) | ||
230 | |||
186 | /* | 231 | /* |
187 | * 2nd variant for commands whose specified immediate length differs | 232 | * 2nd variant for commands whose specified immediate length differs |
188 | * from length of immediate data provided, e.g., split keys | 233 | * from length of immediate data provided, e.g., split keys |
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 7e2d54bffad6..9955ed9643e6 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
@@ -39,18 +39,20 @@ static void report_ccb_status(u32 status, char *outstr) | |||
39 | char *cha_id_list[] = { | 39 | char *cha_id_list[] = { |
40 | "", | 40 | "", |
41 | "AES", | 41 | "AES", |
42 | "DES, 3DES", | 42 | "DES", |
43 | "ARC4", | 43 | "ARC4", |
44 | "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512", | 44 | "MDHA", |
45 | "RNG", | 45 | "RNG", |
46 | "SNOW f8", | 46 | "SNOW f8", |
47 | "Kasumi f8, f9", | 47 | "Kasumi f8/9", |
48 | "All Public Key Algorithms", | 48 | "PKHA", |
49 | "CRC", | 49 | "CRCA", |
50 | "SNOW f9", | 50 | "SNOW f9", |
51 | "ZUCE", | ||
52 | "ZUCA", | ||
51 | }; | 53 | }; |
52 | char *err_id_list[] = { | 54 | char *err_id_list[] = { |
53 | "None. No error.", | 55 | "No error.", |
54 | "Mode error.", | 56 | "Mode error.", |
55 | "Data size error.", | 57 | "Data size error.", |
56 | "Key size error.", | 58 | "Key size error.", |
@@ -67,6 +69,20 @@ static void report_ccb_status(u32 status, char *outstr) | |||
67 | "Invalid CHA combination was selected", | 69 | "Invalid CHA combination was selected", |
68 | "Invalid CHA selected.", | 70 | "Invalid CHA selected.", |
69 | }; | 71 | }; |
72 | char *rng_err_id_list[] = { | ||
73 | "", | ||
74 | "", | ||
75 | "", | ||
76 | "Instantiate", | ||
77 | "Not instantiated", | ||
78 | "Test instantiate", | ||
79 | "Prediction resistance", | ||
80 | "", | ||
81 | "Prediction resistance and test request", | ||
82 | "Uninstantiate", | ||
83 | "", | ||
84 | "Secure key generation", | ||
85 | }; | ||
70 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> | 86 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> |
71 | JRSTA_CCBERR_CHAID_SHIFT; | 87 | JRSTA_CCBERR_CHAID_SHIFT; |
72 | u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; | 88 | u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; |
@@ -81,7 +97,13 @@ static void report_ccb_status(u32 status, char *outstr) | |||
81 | cha_id, sizeof("ff")); | 97 | cha_id, sizeof("ff")); |
82 | } | 98 | } |
83 | 99 | ||
84 | if (err_id < ARRAY_SIZE(err_id_list)) { | 100 | if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG && |
101 | err_id < ARRAY_SIZE(rng_err_id_list) && | ||
102 | strlen(rng_err_id_list[err_id])) { | ||
103 | /* RNG-only error */ | ||
104 | SPRINTFCAT(outstr, "%s", rng_err_id_list[err_id], | ||
105 | strlen(rng_err_id_list[err_id])); | ||
106 | } else if (err_id < ARRAY_SIZE(err_id_list)) { | ||
85 | SPRINTFCAT(outstr, "%s", err_id_list[err_id], | 107 | SPRINTFCAT(outstr, "%s", err_id_list[err_id], |
86 | strlen(err_id_list[err_id])); | 108 | strlen(err_id_list[err_id])); |
87 | } else { | 109 | } else { |
@@ -101,10 +123,10 @@ static void report_deco_status(u32 status, char *outstr) | |||
101 | u8 value; | 123 | u8 value; |
102 | char *error_text; | 124 | char *error_text; |
103 | } desc_error_list[] = { | 125 | } desc_error_list[] = { |
104 | { 0x00, "None. No error." }, | 126 | { 0x00, "No error." }, |
105 | { 0x01, "SGT Length Error. The descriptor is trying to read " | 127 | { 0x01, "SGT Length Error. The descriptor is trying to read " |
106 | "more data than is contained in the SGT table." }, | 128 | "more data than is contained in the SGT table." }, |
107 | { 0x02, "Reserved." }, | 129 | { 0x02, "SGT Null Entry Error." }, |
108 | { 0x03, "Job Ring Control Error. There is a bad value in the " | 130 | { 0x03, "Job Ring Control Error. There is a bad value in the " |
109 | "Job Ring Control register." }, | 131 | "Job Ring Control register." }, |
110 | { 0x04, "Invalid Descriptor Command. The Descriptor Command " | 132 | { 0x04, "Invalid Descriptor Command. The Descriptor Command " |
@@ -116,7 +138,7 @@ static void report_deco_status(u32 status, char *outstr) | |||
116 | { 0x09, "Invalid OPERATION Command" }, | 138 | { 0x09, "Invalid OPERATION Command" }, |
117 | { 0x0A, "Invalid FIFO LOAD Command" }, | 139 | { 0x0A, "Invalid FIFO LOAD Command" }, |
118 | { 0x0B, "Invalid FIFO STORE Command" }, | 140 | { 0x0B, "Invalid FIFO STORE Command" }, |
119 | { 0x0C, "Invalid MOVE Command" }, | 141 | { 0x0C, "Invalid MOVE/MOVE_LEN Command" }, |
120 | { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is " | 142 | { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is " |
121 | "invalid because the target is not a Job Header " | 143 | "invalid because the target is not a Job Header " |
122 | "Command, or the jump is from a Trusted Descriptor to " | 144 | "Command, or the jump is from a Trusted Descriptor to " |
@@ -166,6 +188,8 @@ static void report_deco_status(u32 status, char *outstr) | |||
166 | "(input frame; block ciphers) and IPsec decap (output " | 188 | "(input frame; block ciphers) and IPsec decap (output " |
167 | "frame, when doing the next header byte update) and " | 189 | "frame, when doing the next header byte update) and " |
168 | "DCRC (output frame)." }, | 190 | "DCRC (output frame)." }, |
191 | { 0x23, "Read Input Frame error" }, | ||
192 | { 0x24, "JDKEK, TDKEK or TDSK not loaded error" }, | ||
169 | { 0x80, "DNR (do not run) error" }, | 193 | { 0x80, "DNR (do not run) error" }, |
170 | { 0x81, "undefined protocol command" }, | 194 | { 0x81, "undefined protocol command" }, |
171 | { 0x82, "invalid setting in PDB" }, | 195 | { 0x82, "invalid setting in PDB" }, |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index a34be01b0b29..5cd4c1b268a1 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -43,7 +43,7 @@ struct caam_drv_private_jr { | |||
43 | struct device *parentdev; /* points back to controller dev */ | 43 | struct device *parentdev; /* points back to controller dev */ |
44 | int ridx; | 44 | int ridx; |
45 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | 45 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
46 | struct tasklet_struct irqtask[NR_CPUS]; | 46 | struct tasklet_struct irqtask; |
47 | int irq; /* One per queue */ | 47 | int irq; /* One per queue */ |
48 | int assign; /* busy/free */ | 48 | int assign; /* busy/free */ |
49 | 49 | ||
@@ -86,10 +86,10 @@ struct caam_drv_private { | |||
86 | 86 | ||
87 | /* which jr allocated to scatterlist crypto */ | 87 | /* which jr allocated to scatterlist crypto */ |
88 | atomic_t tfm_count ____cacheline_aligned; | 88 | atomic_t tfm_count ____cacheline_aligned; |
89 | int num_jrs_for_algapi; | ||
90 | struct device **algapi_jr; | ||
91 | /* list of registered crypto algorithms (mk generic context handle?) */ | 89 | /* list of registered crypto algorithms (mk generic context handle?) */ |
92 | struct list_head alg_list; | 90 | struct list_head alg_list; |
91 | /* list of registered hash algorithms (mk generic context handle?) */ | ||
92 | struct list_head hash_list; | ||
93 | 93 | ||
94 | /* | 94 | /* |
95 | * debugfs entries for developer view into driver/device | 95 | * debugfs entries for developer view into driver/device |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 340fa322c0f0..53c8c51d5881 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * CAAM/SEC 4.x transport/backend driver | 2 | * CAAM/SEC 4.x transport/backend driver |
3 | * JobR backend functionality | 3 | * JobR backend functionality |
4 | * | 4 | * |
5 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "compat.h" | 8 | #include "compat.h" |
@@ -43,7 +43,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | |||
43 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); | 43 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); |
44 | 44 | ||
45 | preempt_disable(); | 45 | preempt_disable(); |
46 | tasklet_schedule(&jrp->irqtask[smp_processor_id()]); | 46 | tasklet_schedule(&jrp->irqtask); |
47 | preempt_enable(); | 47 | preempt_enable(); |
48 | 48 | ||
49 | return IRQ_HANDLED; | 49 | return IRQ_HANDLED; |
@@ -58,17 +58,16 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
58 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); | 58 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); |
59 | u32 *userdesc, userstatus; | 59 | u32 *userdesc, userstatus; |
60 | void *userarg; | 60 | void *userarg; |
61 | unsigned long flags; | ||
62 | 61 | ||
63 | spin_lock_irqsave(&jrp->outlock, flags); | 62 | while (rd_reg32(&jrp->rregs->outring_used)) { |
64 | 63 | ||
65 | head = ACCESS_ONCE(jrp->head); | 64 | head = ACCESS_ONCE(jrp->head); |
66 | sw_idx = tail = jrp->tail; | ||
67 | 65 | ||
68 | while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && | 66 | spin_lock_bh(&jrp->outlock); |
69 | rd_reg32(&jrp->rregs->outring_used)) { | ||
70 | 67 | ||
68 | sw_idx = tail = jrp->tail; | ||
71 | hw_idx = jrp->out_ring_read_index; | 69 | hw_idx = jrp->out_ring_read_index; |
70 | |||
72 | for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { | 71 | for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { |
73 | sw_idx = (tail + i) & (JOBR_DEPTH - 1); | 72 | sw_idx = (tail + i) & (JOBR_DEPTH - 1); |
74 | 73 | ||
@@ -95,7 +94,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
95 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; | 94 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; |
96 | userstatus = jrp->outring[hw_idx].jrstatus; | 95 | userstatus = jrp->outring[hw_idx].jrstatus; |
97 | 96 | ||
98 | smp_mb(); | 97 | /* set done */ |
98 | wr_reg32(&jrp->rregs->outring_rmvd, 1); | ||
99 | 99 | ||
100 | jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & | 100 | jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & |
101 | (JOBR_DEPTH - 1); | 101 | (JOBR_DEPTH - 1); |
@@ -115,22 +115,12 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
115 | jrp->tail = tail; | 115 | jrp->tail = tail; |
116 | } | 116 | } |
117 | 117 | ||
118 | /* set done */ | 118 | spin_unlock_bh(&jrp->outlock); |
119 | wr_reg32(&jrp->rregs->outring_rmvd, 1); | ||
120 | |||
121 | spin_unlock_irqrestore(&jrp->outlock, flags); | ||
122 | 119 | ||
123 | /* Finally, execute user's callback */ | 120 | /* Finally, execute user's callback */ |
124 | usercall(dev, userdesc, userstatus, userarg); | 121 | usercall(dev, userdesc, userstatus, userarg); |
125 | |||
126 | spin_lock_irqsave(&jrp->outlock, flags); | ||
127 | |||
128 | head = ACCESS_ONCE(jrp->head); | ||
129 | sw_idx = tail = jrp->tail; | ||
130 | } | 122 | } |
131 | 123 | ||
132 | spin_unlock_irqrestore(&jrp->outlock, flags); | ||
133 | |||
134 | /* reenable / unmask IRQs */ | 124 | /* reenable / unmask IRQs */ |
135 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | 125 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); |
136 | } | 126 | } |
@@ -148,23 +138,22 @@ int caam_jr_register(struct device *ctrldev, struct device **rdev) | |||
148 | { | 138 | { |
149 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | 139 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
150 | struct caam_drv_private_jr *jrpriv = NULL; | 140 | struct caam_drv_private_jr *jrpriv = NULL; |
151 | unsigned long flags; | ||
152 | int ring; | 141 | int ring; |
153 | 142 | ||
154 | /* Lock, if free ring - assign, unlock */ | 143 | /* Lock, if free ring - assign, unlock */ |
155 | spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); | 144 | spin_lock(&ctrlpriv->jr_alloc_lock); |
156 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | 145 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { |
157 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); | 146 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); |
158 | if (jrpriv->assign == JOBR_UNASSIGNED) { | 147 | if (jrpriv->assign == JOBR_UNASSIGNED) { |
159 | jrpriv->assign = JOBR_ASSIGNED; | 148 | jrpriv->assign = JOBR_ASSIGNED; |
160 | *rdev = ctrlpriv->jrdev[ring]; | 149 | *rdev = ctrlpriv->jrdev[ring]; |
161 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | 150 | spin_unlock(&ctrlpriv->jr_alloc_lock); |
162 | return ring; | 151 | return ring; |
163 | } | 152 | } |
164 | } | 153 | } |
165 | 154 | ||
166 | /* If assigned, write dev where caller needs it */ | 155 | /* If assigned, write dev where caller needs it */ |
167 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | 156 | spin_unlock(&ctrlpriv->jr_alloc_lock); |
168 | *rdev = NULL; | 157 | *rdev = NULL; |
169 | 158 | ||
170 | return -ENODEV; | 159 | return -ENODEV; |
@@ -182,7 +171,6 @@ int caam_jr_deregister(struct device *rdev) | |||
182 | { | 171 | { |
183 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); | 172 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); |
184 | struct caam_drv_private *ctrlpriv; | 173 | struct caam_drv_private *ctrlpriv; |
185 | unsigned long flags; | ||
186 | 174 | ||
187 | /* Get the owning controller's private space */ | 175 | /* Get the owning controller's private space */ |
188 | ctrlpriv = dev_get_drvdata(jrpriv->parentdev); | 176 | ctrlpriv = dev_get_drvdata(jrpriv->parentdev); |
@@ -195,9 +183,9 @@ int caam_jr_deregister(struct device *rdev) | |||
195 | return -EBUSY; | 183 | return -EBUSY; |
196 | 184 | ||
197 | /* Release ring */ | 185 | /* Release ring */ |
198 | spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); | 186 | spin_lock(&ctrlpriv->jr_alloc_lock); |
199 | jrpriv->assign = JOBR_UNASSIGNED; | 187 | jrpriv->assign = JOBR_UNASSIGNED; |
200 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | 188 | spin_unlock(&ctrlpriv->jr_alloc_lock); |
201 | 189 | ||
202 | return 0; | 190 | return 0; |
203 | } | 191 | } |
@@ -238,7 +226,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
238 | { | 226 | { |
239 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | 227 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
240 | struct caam_jrentry_info *head_entry; | 228 | struct caam_jrentry_info *head_entry; |
241 | unsigned long flags; | ||
242 | int head, tail, desc_size; | 229 | int head, tail, desc_size; |
243 | dma_addr_t desc_dma; | 230 | dma_addr_t desc_dma; |
244 | 231 | ||
@@ -249,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
249 | return -EIO; | 236 | return -EIO; |
250 | } | 237 | } |
251 | 238 | ||
252 | spin_lock_irqsave(&jrp->inplock, flags); | 239 | spin_lock(&jrp->inplock); |
253 | 240 | ||
254 | head = jrp->head; | 241 | head = jrp->head; |
255 | tail = ACCESS_ONCE(jrp->tail); | 242 | tail = ACCESS_ONCE(jrp->tail); |
256 | 243 | ||
257 | if (!rd_reg32(&jrp->rregs->inpring_avail) || | 244 | if (!rd_reg32(&jrp->rregs->inpring_avail) || |
258 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { | 245 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { |
259 | spin_unlock_irqrestore(&jrp->inplock, flags); | 246 | spin_unlock(&jrp->inplock); |
260 | dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); | 247 | dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); |
261 | return -EBUSY; | 248 | return -EBUSY; |
262 | } | 249 | } |
@@ -276,11 +263,9 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
276 | (JOBR_DEPTH - 1); | 263 | (JOBR_DEPTH - 1); |
277 | jrp->head = (head + 1) & (JOBR_DEPTH - 1); | 264 | jrp->head = (head + 1) & (JOBR_DEPTH - 1); |
278 | 265 | ||
279 | wmb(); | ||
280 | |||
281 | wr_reg32(&jrp->rregs->inpring_jobadd, 1); | 266 | wr_reg32(&jrp->rregs->inpring_jobadd, 1); |
282 | 267 | ||
283 | spin_unlock_irqrestore(&jrp->inplock, flags); | 268 | spin_unlock(&jrp->inplock); |
284 | 269 | ||
285 | return 0; | 270 | return 0; |
286 | } | 271 | } |
@@ -337,11 +322,9 @@ static int caam_jr_init(struct device *dev) | |||
337 | 322 | ||
338 | jrp = dev_get_drvdata(dev); | 323 | jrp = dev_get_drvdata(dev); |
339 | 324 | ||
340 | /* Connect job ring interrupt handler. */ | 325 | tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); |
341 | for_each_possible_cpu(i) | ||
342 | tasklet_init(&jrp->irqtask[i], caam_jr_dequeue, | ||
343 | (unsigned long)dev); | ||
344 | 326 | ||
327 | /* Connect job ring interrupt handler. */ | ||
345 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, | 328 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, |
346 | "caam-jobr", dev); | 329 | "caam-jobr", dev); |
347 | if (error) { | 330 | if (error) { |
@@ -356,10 +339,11 @@ static int caam_jr_init(struct device *dev) | |||
356 | if (error) | 339 | if (error) |
357 | return error; | 340 | return error; |
358 | 341 | ||
359 | jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH, | 342 | jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, |
360 | GFP_KERNEL | GFP_DMA); | 343 | &inpbusaddr, GFP_KERNEL); |
361 | jrp->outring = kzalloc(sizeof(struct jr_outentry) * | 344 | |
362 | JOBR_DEPTH, GFP_KERNEL | GFP_DMA); | 345 | jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * |
346 | JOBR_DEPTH, &outbusaddr, GFP_KERNEL); | ||
363 | 347 | ||
364 | jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, | 348 | jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, |
365 | GFP_KERNEL); | 349 | GFP_KERNEL); |
@@ -375,31 +359,6 @@ static int caam_jr_init(struct device *dev) | |||
375 | jrp->entinfo[i].desc_addr_dma = !0; | 359 | jrp->entinfo[i].desc_addr_dma = !0; |
376 | 360 | ||
377 | /* Setup rings */ | 361 | /* Setup rings */ |
378 | inpbusaddr = dma_map_single(dev, jrp->inpring, | ||
379 | sizeof(u32 *) * JOBR_DEPTH, | ||
380 | DMA_BIDIRECTIONAL); | ||
381 | if (dma_mapping_error(dev, inpbusaddr)) { | ||
382 | dev_err(dev, "caam_jr_init(): can't map input ring\n"); | ||
383 | kfree(jrp->inpring); | ||
384 | kfree(jrp->outring); | ||
385 | kfree(jrp->entinfo); | ||
386 | return -EIO; | ||
387 | } | ||
388 | |||
389 | outbusaddr = dma_map_single(dev, jrp->outring, | ||
390 | sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
391 | DMA_BIDIRECTIONAL); | ||
392 | if (dma_mapping_error(dev, outbusaddr)) { | ||
393 | dev_err(dev, "caam_jr_init(): can't map output ring\n"); | ||
394 | dma_unmap_single(dev, inpbusaddr, | ||
395 | sizeof(u32 *) * JOBR_DEPTH, | ||
396 | DMA_BIDIRECTIONAL); | ||
397 | kfree(jrp->inpring); | ||
398 | kfree(jrp->outring); | ||
399 | kfree(jrp->entinfo); | ||
400 | return -EIO; | ||
401 | } | ||
402 | |||
403 | jrp->inp_ring_write_index = 0; | 362 | jrp->inp_ring_write_index = 0; |
404 | jrp->out_ring_read_index = 0; | 363 | jrp->out_ring_read_index = 0; |
405 | jrp->head = 0; | 364 | jrp->head = 0; |
@@ -431,12 +390,11 @@ int caam_jr_shutdown(struct device *dev) | |||
431 | { | 390 | { |
432 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | 391 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
433 | dma_addr_t inpbusaddr, outbusaddr; | 392 | dma_addr_t inpbusaddr, outbusaddr; |
434 | int ret, i; | 393 | int ret; |
435 | 394 | ||
436 | ret = caam_reset_hw_jr(dev); | 395 | ret = caam_reset_hw_jr(dev); |
437 | 396 | ||
438 | for_each_possible_cpu(i) | 397 | tasklet_kill(&jrp->irqtask); |
439 | tasklet_kill(&jrp->irqtask[i]); | ||
440 | 398 | ||
441 | /* Release interrupt */ | 399 | /* Release interrupt */ |
442 | free_irq(jrp->irq, dev); | 400 | free_irq(jrp->irq, dev); |
@@ -444,13 +402,10 @@ int caam_jr_shutdown(struct device *dev) | |||
444 | /* Free rings */ | 402 | /* Free rings */ |
445 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); | 403 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); |
446 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); | 404 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); |
447 | dma_unmap_single(dev, outbusaddr, | 405 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, |
448 | sizeof(struct jr_outentry) * JOBR_DEPTH, | 406 | jrp->inpring, inpbusaddr); |
449 | DMA_BIDIRECTIONAL); | 407 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, |
450 | dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH, | 408 | jrp->outring, outbusaddr); |
451 | DMA_BIDIRECTIONAL); | ||
452 | kfree(jrp->outring); | ||
453 | kfree(jrp->inpring); | ||
454 | kfree(jrp->entinfo); | 409 | kfree(jrp->entinfo); |
455 | 410 | ||
456 | return ret; | 411 | return ret; |
@@ -503,6 +458,14 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | |||
503 | dev_set_drvdata(jrdev, jrpriv); | 458 | dev_set_drvdata(jrdev, jrpriv); |
504 | ctrlpriv->jrdev[ring] = jrdev; | 459 | ctrlpriv->jrdev[ring] = jrdev; |
505 | 460 | ||
461 | if (sizeof(dma_addr_t) == sizeof(u64)) | ||
462 | if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) | ||
463 | dma_set_mask(jrdev, DMA_BIT_MASK(40)); | ||
464 | else | ||
465 | dma_set_mask(jrdev, DMA_BIT_MASK(36)); | ||
466 | else | ||
467 | dma_set_mask(jrdev, DMA_BIT_MASK(32)); | ||
468 | |||
506 | /* Identify the interrupt */ | 469 | /* Identify the interrupt */ |
507 | jrpriv->irq = of_irq_to_resource(np, 0, NULL); | 470 | jrpriv->irq = of_irq_to_resource(np, 0, NULL); |
508 | 471 | ||
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c new file mode 100644 index 000000000000..002888185f17 --- /dev/null +++ b/drivers/crypto/caam/key_gen.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * CAAM/SEC 4.x functions for handling key-generation jobs | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | */ | ||
7 | #include "compat.h" | ||
8 | #include "jr.h" | ||
9 | #include "error.h" | ||
10 | #include "desc_constr.h" | ||
11 | #include "key_gen.h" | ||
12 | |||
13 | void split_key_done(struct device *dev, u32 *desc, u32 err, | ||
14 | void *context) | ||
15 | { | ||
16 | struct split_key_result *res = context; | ||
17 | |||
18 | #ifdef DEBUG | ||
19 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
20 | #endif | ||
21 | |||
22 | if (err) { | ||
23 | char tmp[CAAM_ERROR_STR_MAX]; | ||
24 | |||
25 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
26 | } | ||
27 | |||
28 | res->err = err; | ||
29 | |||
30 | complete(&res->completion); | ||
31 | } | ||
32 | EXPORT_SYMBOL(split_key_done); | ||
33 | /* | ||
34 | get a split ipad/opad key | ||
35 | |||
36 | Split key generation----------------------------------------------- | ||
37 | |||
38 | [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 | ||
39 | [01] 0x04000014 key: class2->keyreg len=20 | ||
40 | @0xffe01000 | ||
41 | [03] 0x84410014 operation: cls2-op sha1 hmac init dec | ||
42 | [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm | ||
43 | [05] 0xa4000001 jump: class2 local all ->1 [06] | ||
44 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | ||
45 | @0xffe04000 | ||
46 | */ | ||
47 | u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | ||
48 | int split_key_pad_len, const u8 *key_in, u32 keylen, | ||
49 | u32 alg_op) | ||
50 | { | ||
51 | u32 *desc; | ||
52 | struct split_key_result result; | ||
53 | dma_addr_t dma_addr_in, dma_addr_out; | ||
54 | int ret = 0; | ||
55 | |||
56 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | ||
57 | |||
58 | init_job_desc(desc, 0); | ||
59 | |||
60 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, | ||
61 | DMA_TO_DEVICE); | ||
62 | if (dma_mapping_error(jrdev, dma_addr_in)) { | ||
63 | dev_err(jrdev, "unable to map key input memory\n"); | ||
64 | kfree(desc); | ||
65 | return -ENOMEM; | ||
66 | } | ||
67 | append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); | ||
68 | |||
69 | /* Sets MDHA up into an HMAC-INIT */ | ||
70 | append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT); | ||
71 | |||
72 | /* | ||
73 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | ||
74 | * into both pads inside MDHA | ||
75 | */ | ||
76 | append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | | ||
77 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); | ||
78 | |||
79 | /* | ||
80 | * FIFO_STORE with the explicit split-key content store | ||
81 | * (0x26 output type) | ||
82 | */ | ||
83 | dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, | ||
84 | DMA_FROM_DEVICE); | ||
85 | if (dma_mapping_error(jrdev, dma_addr_out)) { | ||
86 | dev_err(jrdev, "unable to map key output memory\n"); | ||
87 | kfree(desc); | ||
88 | return -ENOMEM; | ||
89 | } | ||
90 | append_fifo_store(desc, dma_addr_out, split_key_len, | ||
91 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | ||
92 | |||
93 | #ifdef DEBUG | ||
94 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
95 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); | ||
96 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
97 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
98 | #endif | ||
99 | |||
100 | result.err = 0; | ||
101 | init_completion(&result.completion); | ||
102 | |||
103 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | ||
104 | if (!ret) { | ||
105 | /* in progress */ | ||
106 | wait_for_completion_interruptible(&result.completion); | ||
107 | ret = result.err; | ||
108 | #ifdef DEBUG | ||
109 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
110 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, | ||
111 | split_key_pad_len, 1); | ||
112 | #endif | ||
113 | } | ||
114 | |||
115 | dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, | ||
116 | DMA_FROM_DEVICE); | ||
117 | dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); | ||
118 | |||
119 | kfree(desc); | ||
120 | |||
121 | return ret; | ||
122 | } | ||
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h new file mode 100644 index 000000000000..d95d290c6e8b --- /dev/null +++ b/drivers/crypto/caam/key_gen.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * CAAM/SEC 4.x definitions for handling key-generation jobs | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | struct split_key_result { | ||
9 | struct completion completion; | ||
10 | int err; | ||
11 | }; | ||
12 | |||
13 | void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); | ||
14 | |||
15 | u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | ||
16 | int split_key_pad_len, const u8 *key_in, u32 keylen, | ||
17 | u32 alg_op); | ||
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h new file mode 100644 index 000000000000..62950d22ac13 --- /dev/null +++ b/drivers/crypto/caam/pdb.h | |||
@@ -0,0 +1,401 @@ | |||
1 | /* | ||
2 | * CAAM Protocol Data Block (PDB) definition header file | ||
3 | * | ||
4 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef CAAM_PDB_H | ||
9 | #define CAAM_PDB_H | ||
10 | |||
11 | /* | ||
12 | * PDB- IPSec ESP Header Modification Options | ||
13 | */ | ||
14 | #define PDBHMO_ESP_DECAP_SHIFT 12 | ||
15 | #define PDBHMO_ESP_ENCAP_SHIFT 4 | ||
16 | /* | ||
17 | * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the | ||
18 | * Options Byte IP version (IPvsn) field: | ||
19 | * if IPv4, decrement the inner IP header TTL field (byte 8); | ||
20 | * if IPv6 decrement the inner IP header Hop Limit field (byte 7). | ||
21 | */ | ||
22 | #define PDBHMO_ESP_DECAP_DEC_TTL (0x02 << PDBHMO_ESP_DECAP_SHIFT) | ||
23 | #define PDBHMO_ESP_ENCAP_DEC_TTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT) | ||
24 | /* | ||
25 | * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte | ||
26 | * from the outer IP header to the inner IP header. | ||
27 | */ | ||
28 | #define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT) | ||
29 | /* | ||
30 | * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from | ||
31 | * the PDB, copy the DF bit from the inner IP header to the outer IP header. | ||
32 | */ | ||
33 | #define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT) | ||
34 | |||
35 | /* | ||
36 | * PDB - IPSec ESP Encap/Decap Options | ||
37 | */ | ||
38 | #define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */ | ||
39 | #define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */ | ||
40 | #define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */ | ||
41 | #define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */ | ||
42 | #define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */ | ||
43 | #define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */ | ||
44 | #define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ | ||
45 | #define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */ | ||
46 | #define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */ | ||
47 | #define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */ | ||
48 | #define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */ | ||
49 | #define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */ | ||
50 | #define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */ | ||
51 | #define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */ | ||
52 | |||
53 | /* | ||
54 | * General IPSec encap/decap PDB definitions | ||
55 | */ | ||
56 | struct ipsec_encap_cbc { | ||
57 | u32 iv[4]; | ||
58 | }; | ||
59 | |||
60 | struct ipsec_encap_ctr { | ||
61 | u32 ctr_nonce; | ||
62 | u32 ctr_initial; | ||
63 | u32 iv[2]; | ||
64 | }; | ||
65 | |||
66 | struct ipsec_encap_ccm { | ||
67 | u32 salt; /* lower 24 bits */ | ||
68 | u8 b0_flags; | ||
69 | u8 ctr_flags; | ||
70 | u16 ctr_initial; | ||
71 | u32 iv[2]; | ||
72 | }; | ||
73 | |||
74 | struct ipsec_encap_gcm { | ||
75 | u32 salt; /* lower 24 bits */ | ||
76 | u32 rsvd1; | ||
77 | u32 iv[2]; | ||
78 | }; | ||
79 | |||
80 | struct ipsec_encap_pdb { | ||
81 | u8 hmo_rsvd; | ||
82 | u8 ip_nh; | ||
83 | u8 ip_nh_offset; | ||
84 | u8 options; | ||
85 | u32 seq_num_ext_hi; | ||
86 | u32 seq_num; | ||
87 | union { | ||
88 | struct ipsec_encap_cbc cbc; | ||
89 | struct ipsec_encap_ctr ctr; | ||
90 | struct ipsec_encap_ccm ccm; | ||
91 | struct ipsec_encap_gcm gcm; | ||
92 | }; | ||
93 | u32 spi; | ||
94 | u16 rsvd1; | ||
95 | u16 ip_hdr_len; | ||
96 | u32 ip_hdr[0]; /* optional IP Header content */ | ||
97 | }; | ||
98 | |||
99 | struct ipsec_decap_cbc { | ||
100 | u32 rsvd[2]; | ||
101 | }; | ||
102 | |||
103 | struct ipsec_decap_ctr { | ||
104 | u32 salt; | ||
105 | u32 ctr_initial; | ||
106 | }; | ||
107 | |||
108 | struct ipsec_decap_ccm { | ||
109 | u32 salt; | ||
110 | u8 iv_flags; | ||
111 | u8 ctr_flags; | ||
112 | u16 ctr_initial; | ||
113 | }; | ||
114 | |||
115 | struct ipsec_decap_gcm { | ||
116 | u32 salt; | ||
117 | u32 resvd; | ||
118 | }; | ||
119 | |||
120 | struct ipsec_decap_pdb { | ||
121 | u16 hmo_ip_hdr_len; | ||
122 | u8 ip_nh_offset; | ||
123 | u8 options; | ||
124 | union { | ||
125 | struct ipsec_decap_cbc cbc; | ||
126 | struct ipsec_decap_ctr ctr; | ||
127 | struct ipsec_decap_ccm ccm; | ||
128 | struct ipsec_decap_gcm gcm; | ||
129 | }; | ||
130 | u32 seq_num_ext_hi; | ||
131 | u32 seq_num; | ||
132 | u32 anti_replay[2]; | ||
133 | u32 end_index[0]; | ||
134 | }; | ||
135 | |||
136 | /* | ||
137 | * IPSec ESP Datapath Protocol Override Register (DPOVRD) | ||
138 | */ | ||
139 | struct ipsec_deco_dpovrd { | ||
140 | #define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80 | ||
141 | u8 ovrd_ecn; | ||
142 | u8 ip_hdr_len; | ||
143 | u8 nh_offset; | ||
144 | u8 next_header; /* reserved if decap */ | ||
145 | }; | ||
146 | |||
147 | /* | ||
148 | * IEEE 802.11i WiFi Protocol Data Block | ||
149 | */ | ||
150 | #define WIFI_PDBOPTS_FCS 0x01 | ||
151 | #define WIFI_PDBOPTS_AR 0x40 | ||
152 | |||
153 | struct wifi_encap_pdb { | ||
154 | u16 mac_hdr_len; | ||
155 | u8 rsvd; | ||
156 | u8 options; | ||
157 | u8 iv_flags; | ||
158 | u8 pri; | ||
159 | u16 pn1; | ||
160 | u32 pn2; | ||
161 | u16 frm_ctrl_mask; | ||
162 | u16 seq_ctrl_mask; | ||
163 | u8 rsvd1[2]; | ||
164 | u8 cnst; | ||
165 | u8 key_id; | ||
166 | u8 ctr_flags; | ||
167 | u8 rsvd2; | ||
168 | u16 ctr_init; | ||
169 | }; | ||
170 | |||
171 | struct wifi_decap_pdb { | ||
172 | u16 mac_hdr_len; | ||
173 | u8 rsvd; | ||
174 | u8 options; | ||
175 | u8 iv_flags; | ||
176 | u8 pri; | ||
177 | u16 pn1; | ||
178 | u32 pn2; | ||
179 | u16 frm_ctrl_mask; | ||
180 | u16 seq_ctrl_mask; | ||
181 | u8 rsvd1[4]; | ||
182 | u8 ctr_flags; | ||
183 | u8 rsvd2; | ||
184 | u16 ctr_init; | ||
185 | }; | ||
186 | |||
187 | /* | ||
188 | * IEEE 802.16 WiMAX Protocol Data Block | ||
189 | */ | ||
190 | #define WIMAX_PDBOPTS_FCS 0x01 | ||
191 | #define WIMAX_PDBOPTS_AR 0x40 /* decap only */ | ||
192 | |||
193 | struct wimax_encap_pdb { | ||
194 | u8 rsvd[3]; | ||
195 | u8 options; | ||
196 | u32 nonce; | ||
197 | u8 b0_flags; | ||
198 | u8 ctr_flags; | ||
199 | u16 ctr_init; | ||
200 | /* begin DECO writeback region */ | ||
201 | u32 pn; | ||
202 | /* end DECO writeback region */ | ||
203 | }; | ||
204 | |||
205 | struct wimax_decap_pdb { | ||
206 | u8 rsvd[3]; | ||
207 | u8 options; | ||
208 | u32 nonce; | ||
209 | u8 iv_flags; | ||
210 | u8 ctr_flags; | ||
211 | u16 ctr_init; | ||
212 | /* begin DECO writeback region */ | ||
213 | u32 pn; | ||
214 | u8 rsvd1[2]; | ||
215 | u16 antireplay_len; | ||
216 | u64 antireplay_scorecard; | ||
217 | /* end DECO writeback region */ | ||
218 | }; | ||
219 | |||
220 | /* | ||
221 | * IEEE 801.AE MacSEC Protocol Data Block | ||
222 | */ | ||
223 | #define MACSEC_PDBOPTS_FCS 0x01 | ||
224 | #define MACSEC_PDBOPTS_AR 0x40 /* used in decap only */ | ||
225 | |||
226 | struct macsec_encap_pdb { | ||
227 | u16 aad_len; | ||
228 | u8 rsvd; | ||
229 | u8 options; | ||
230 | u64 sci; | ||
231 | u16 ethertype; | ||
232 | u8 tci_an; | ||
233 | u8 rsvd1; | ||
234 | /* begin DECO writeback region */ | ||
235 | u32 pn; | ||
236 | /* end DECO writeback region */ | ||
237 | }; | ||
238 | |||
239 | struct macsec_decap_pdb { | ||
240 | u16 aad_len; | ||
241 | u8 rsvd; | ||
242 | u8 options; | ||
243 | u64 sci; | ||
244 | u8 rsvd1[3]; | ||
245 | /* begin DECO writeback region */ | ||
246 | u8 antireplay_len; | ||
247 | u32 pn; | ||
248 | u64 antireplay_scorecard; | ||
249 | /* end DECO writeback region */ | ||
250 | }; | ||
251 | |||
252 | /* | ||
253 | * SSL/TLS/DTLS Protocol Data Blocks | ||
254 | */ | ||
255 | |||
256 | #define TLS_PDBOPTS_ARS32 0x40 | ||
257 | #define TLS_PDBOPTS_ARS64 0xc0 | ||
258 | #define TLS_PDBOPTS_OUTFMT 0x08 | ||
259 | #define TLS_PDBOPTS_IV_WRTBK 0x02 /* 1.1/1.2/DTLS only */ | ||
260 | #define TLS_PDBOPTS_EXP_RND_IV 0x01 /* 1.1/1.2/DTLS only */ | ||
261 | |||
262 | struct tls_block_encap_pdb { | ||
263 | u8 type; | ||
264 | u8 version[2]; | ||
265 | u8 options; | ||
266 | u64 seq_num; | ||
267 | u32 iv[4]; | ||
268 | }; | ||
269 | |||
270 | struct tls_stream_encap_pdb { | ||
271 | u8 type; | ||
272 | u8 version[2]; | ||
273 | u8 options; | ||
274 | u64 seq_num; | ||
275 | u8 i; | ||
276 | u8 j; | ||
277 | u8 rsvd1[2]; | ||
278 | }; | ||
279 | |||
280 | struct dtls_block_encap_pdb { | ||
281 | u8 type; | ||
282 | u8 version[2]; | ||
283 | u8 options; | ||
284 | u16 epoch; | ||
285 | u16 seq_num[3]; | ||
286 | u32 iv[4]; | ||
287 | }; | ||
288 | |||
289 | struct tls_block_decap_pdb { | ||
290 | u8 rsvd[3]; | ||
291 | u8 options; | ||
292 | u64 seq_num; | ||
293 | u32 iv[4]; | ||
294 | }; | ||
295 | |||
296 | struct tls_stream_decap_pdb { | ||
297 | u8 rsvd[3]; | ||
298 | u8 options; | ||
299 | u64 seq_num; | ||
300 | u8 i; | ||
301 | u8 j; | ||
302 | u8 rsvd1[2]; | ||
303 | }; | ||
304 | |||
305 | struct dtls_block_decap_pdb { | ||
306 | u8 rsvd[3]; | ||
307 | u8 options; | ||
308 | u16 epoch; | ||
309 | u16 seq_num[3]; | ||
310 | u32 iv[4]; | ||
311 | u64 antireplay_scorecard; | ||
312 | }; | ||
313 | |||
314 | /* | ||
315 | * SRTP Protocol Data Blocks | ||
316 | */ | ||
317 | #define SRTP_PDBOPTS_MKI 0x08 | ||
318 | #define SRTP_PDBOPTS_AR 0x40 | ||
319 | |||
320 | struct srtp_encap_pdb { | ||
321 | u8 x_len; | ||
322 | u8 mki_len; | ||
323 | u8 n_tag; | ||
324 | u8 options; | ||
325 | u32 cnst0; | ||
326 | u8 rsvd[2]; | ||
327 | u16 cnst1; | ||
328 | u16 salt[7]; | ||
329 | u16 cnst2; | ||
330 | u32 rsvd1; | ||
331 | u32 roc; | ||
332 | u32 opt_mki; | ||
333 | }; | ||
334 | |||
335 | struct srtp_decap_pdb { | ||
336 | u8 x_len; | ||
337 | u8 mki_len; | ||
338 | u8 n_tag; | ||
339 | u8 options; | ||
340 | u32 cnst0; | ||
341 | u8 rsvd[2]; | ||
342 | u16 cnst1; | ||
343 | u16 salt[7]; | ||
344 | u16 cnst2; | ||
345 | u16 rsvd1; | ||
346 | u16 seq_num; | ||
347 | u32 roc; | ||
348 | u64 antireplay_scorecard; | ||
349 | }; | ||
350 | |||
351 | /* | ||
352 | * DSA/ECDSA Protocol Data Blocks | ||
353 | * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar | ||
354 | * except for the treatment of "w" for verify, "s" for sign, | ||
355 | * and the placement of "a,b". | ||
356 | */ | ||
357 | #define DSA_PDB_SGF_SHIFT 24 | ||
358 | #define DSA_PDB_SGF_MASK (0xff << DSA_PDB_SGF_SHIFT) | ||
359 | #define DSA_PDB_SGF_Q (0x80 << DSA_PDB_SGF_SHIFT) | ||
360 | #define DSA_PDB_SGF_R (0x40 << DSA_PDB_SGF_SHIFT) | ||
361 | #define DSA_PDB_SGF_G (0x20 << DSA_PDB_SGF_SHIFT) | ||
362 | #define DSA_PDB_SGF_W (0x10 << DSA_PDB_SGF_SHIFT) | ||
363 | #define DSA_PDB_SGF_S (0x10 << DSA_PDB_SGF_SHIFT) | ||
364 | #define DSA_PDB_SGF_F (0x08 << DSA_PDB_SGF_SHIFT) | ||
365 | #define DSA_PDB_SGF_C (0x04 << DSA_PDB_SGF_SHIFT) | ||
366 | #define DSA_PDB_SGF_D (0x02 << DSA_PDB_SGF_SHIFT) | ||
367 | #define DSA_PDB_SGF_AB_SIGN (0x02 << DSA_PDB_SGF_SHIFT) | ||
368 | #define DSA_PDB_SGF_AB_VERIFY (0x01 << DSA_PDB_SGF_SHIFT) | ||
369 | |||
370 | #define DSA_PDB_L_SHIFT 7 | ||
371 | #define DSA_PDB_L_MASK (0x3ff << DSA_PDB_L_SHIFT) | ||
372 | |||
373 | #define DSA_PDB_N_MASK 0x7f | ||
374 | |||
375 | struct dsa_sign_pdb { | ||
376 | u32 sgf_ln; /* Use DSA_PDB_ defintions per above */ | ||
377 | u8 *q; | ||
378 | u8 *r; | ||
379 | u8 *g; /* or Gx,y */ | ||
380 | u8 *s; | ||
381 | u8 *f; | ||
382 | u8 *c; | ||
383 | u8 *d; | ||
384 | u8 *ab; /* ECC only */ | ||
385 | u8 *u; | ||
386 | }; | ||
387 | |||
388 | struct dsa_verify_pdb { | ||
389 | u32 sgf_ln; | ||
390 | u8 *q; | ||
391 | u8 *r; | ||
392 | u8 *g; /* or Gx,y */ | ||
393 | u8 *w; /* or Wx,y */ | ||
394 | u8 *f; | ||
395 | u8 *c; | ||
396 | u8 *d; | ||
397 | u8 *tmp; /* temporary data block */ | ||
398 | u8 *ab; /* only used if ECC processing */ | ||
399 | }; | ||
400 | |||
401 | #endif | ||
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index e9f7a70cdd5e..3223fc6d647c 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -117,6 +117,12 @@ struct jr_outentry { | |||
117 | #define CHA_NUM_DECONUM_SHIFT 56 | 117 | #define CHA_NUM_DECONUM_SHIFT 56 |
118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) | 118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) |
119 | 119 | ||
120 | struct sec_vid { | ||
121 | u16 ip_id; | ||
122 | u8 maj_rev; | ||
123 | u8 min_rev; | ||
124 | }; | ||
125 | |||
120 | struct caam_perfmon { | 126 | struct caam_perfmon { |
121 | /* Performance Monitor Registers f00-f9f */ | 127 | /* Performance Monitor Registers f00-f9f */ |
122 | u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */ | 128 | u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */ |
@@ -167,7 +173,7 @@ struct partid { | |||
167 | u32 pidr; /* partition ID, DECO */ | 173 | u32 pidr; /* partition ID, DECO */ |
168 | }; | 174 | }; |
169 | 175 | ||
170 | /* RNG test mode (replicated twice in some configurations) */ | 176 | /* RNGB test mode (replicated twice in some configurations) */ |
171 | /* Padded out to 0x100 */ | 177 | /* Padded out to 0x100 */ |
172 | struct rngtst { | 178 | struct rngtst { |
173 | u32 mode; /* RTSTMODEx - Test mode */ | 179 | u32 mode; /* RTSTMODEx - Test mode */ |
@@ -200,6 +206,31 @@ struct rngtst { | |||
200 | u32 rsvd14[15]; | 206 | u32 rsvd14[15]; |
201 | }; | 207 | }; |
202 | 208 | ||
209 | /* RNG4 TRNG test registers */ | ||
210 | struct rng4tst { | ||
211 | #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ | ||
212 | u32 rtmctl; /* misc. control register */ | ||
213 | u32 rtscmisc; /* statistical check misc. register */ | ||
214 | u32 rtpkrrng; /* poker range register */ | ||
215 | union { | ||
216 | u32 rtpkrmax; /* PRGM=1: poker max. limit register */ | ||
217 | u32 rtpkrsq; /* PRGM=0: poker square calc. result register */ | ||
218 | }; | ||
219 | #define RTSDCTL_ENT_DLY_SHIFT 16 | ||
220 | #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) | ||
221 | u32 rtsdctl; /* seed control register */ | ||
222 | union { | ||
223 | u32 rtsblim; /* PRGM=1: sparse bit limit register */ | ||
224 | u32 rttotsam; /* PRGM=0: total samples register */ | ||
225 | }; | ||
226 | u32 rtfrqmin; /* frequency count min. limit register */ | ||
227 | union { | ||
228 | u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ | ||
229 | u32 rtfrqcnt; /* PRGM=0: freq. count register */ | ||
230 | }; | ||
231 | u32 rsvd1[56]; | ||
232 | }; | ||
233 | |||
203 | /* | 234 | /* |
204 | * caam_ctrl - basic core configuration | 235 | * caam_ctrl - basic core configuration |
205 | * starts base + 0x0000 padded out to 0x1000 | 236 | * starts base + 0x0000 padded out to 0x1000 |
@@ -249,7 +280,10 @@ struct caam_ctrl { | |||
249 | 280 | ||
250 | /* RNG Test/Verification/Debug Access 600-7ff */ | 281 | /* RNG Test/Verification/Debug Access 600-7ff */ |
251 | /* (Useful in Test/Debug modes only...) */ | 282 | /* (Useful in Test/Debug modes only...) */ |
252 | struct rngtst rtst[2]; | 283 | union { |
284 | struct rngtst rtst[2]; | ||
285 | struct rng4tst r4tst[2]; | ||
286 | }; | ||
253 | 287 | ||
254 | u32 rsvd9[448]; | 288 | u32 rsvd9[448]; |
255 | 289 | ||
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h new file mode 100644 index 000000000000..e0037c8ee243 --- /dev/null +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * CAAM/SEC 4.x functions for using scatterlists in caam driver | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | struct sec4_sg_entry; | ||
9 | |||
10 | /* | ||
11 | * convert single dma address to h/w link table format | ||
12 | */ | ||
13 | static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, | ||
14 | dma_addr_t dma, u32 len, u32 offset) | ||
15 | { | ||
16 | sec4_sg_ptr->ptr = dma; | ||
17 | sec4_sg_ptr->len = len; | ||
18 | sec4_sg_ptr->reserved = 0; | ||
19 | sec4_sg_ptr->buf_pool_id = 0; | ||
20 | sec4_sg_ptr->offset = offset; | ||
21 | #ifdef DEBUG | ||
22 | print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", | ||
23 | DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, | ||
24 | sizeof(struct sec4_sg_entry), 1); | ||
25 | #endif | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * convert scatterlist to h/w link table format | ||
30 | * but does not have final bit; instead, returns last entry | ||
31 | */ | ||
32 | static inline struct sec4_sg_entry * | ||
33 | sg_to_sec4_sg(struct scatterlist *sg, int sg_count, | ||
34 | struct sec4_sg_entry *sec4_sg_ptr, u32 offset) | ||
35 | { | ||
36 | while (sg_count) { | ||
37 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), | ||
38 | sg_dma_len(sg), offset); | ||
39 | sec4_sg_ptr++; | ||
40 | sg = scatterwalk_sg_next(sg); | ||
41 | sg_count--; | ||
42 | } | ||
43 | return sec4_sg_ptr - 1; | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * convert scatterlist to h/w link table format | ||
48 | * scatterlist must have been previously dma mapped | ||
49 | */ | ||
50 | static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, | ||
51 | struct sec4_sg_entry *sec4_sg_ptr, | ||
52 | u32 offset) | ||
53 | { | ||
54 | sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); | ||
55 | sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; | ||
56 | } | ||
57 | |||
58 | /* count number of elements in scatterlist */ | ||
59 | static inline int __sg_count(struct scatterlist *sg_list, int nbytes, | ||
60 | bool *chained) | ||
61 | { | ||
62 | struct scatterlist *sg = sg_list; | ||
63 | int sg_nents = 0; | ||
64 | |||
65 | while (nbytes > 0) { | ||
66 | sg_nents++; | ||
67 | nbytes -= sg->length; | ||
68 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | ||
69 | *chained = true; | ||
70 | sg = scatterwalk_sg_next(sg); | ||
71 | } | ||
72 | |||
73 | return sg_nents; | ||
74 | } | ||
75 | |||
76 | /* derive number of elements in scatterlist, but return 0 for 1 */ | ||
77 | static inline int sg_count(struct scatterlist *sg_list, int nbytes, | ||
78 | bool *chained) | ||
79 | { | ||
80 | int sg_nents = __sg_count(sg_list, nbytes, chained); | ||
81 | |||
82 | if (likely(sg_nents == 1)) | ||
83 | return 0; | ||
84 | |||
85 | return sg_nents; | ||
86 | } | ||
87 | |||
88 | static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, | ||
89 | unsigned int nents, enum dma_data_direction dir, | ||
90 | bool chained) | ||
91 | { | ||
92 | if (unlikely(chained)) { | ||
93 | int i; | ||
94 | for (i = 0; i < nents; i++) { | ||
95 | dma_map_sg(dev, sg, 1, dir); | ||
96 | sg = scatterwalk_sg_next(sg); | ||
97 | } | ||
98 | } else { | ||
99 | dma_map_sg(dev, sg, nents, dir); | ||
100 | } | ||
101 | return nents; | ||
102 | } | ||
103 | |||
104 | static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, | ||
105 | unsigned int nents, enum dma_data_direction dir, | ||
106 | bool chained) | ||
107 | { | ||
108 | if (unlikely(chained)) { | ||
109 | int i; | ||
110 | for (i = 0; i < nents; i++) { | ||
111 | dma_unmap_sg(dev, sg, 1, dir); | ||
112 | sg = scatterwalk_sg_next(sg); | ||
113 | } | ||
114 | } else { | ||
115 | dma_unmap_sg(dev, sg, nents, dir); | ||
116 | } | ||
117 | return nents; | ||
118 | } | ||
119 | |||
120 | /* Copy from len bytes of sg to dest, starting from beginning */ | ||
121 | static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) | ||
122 | { | ||
123 | struct scatterlist *current_sg = sg; | ||
124 | int cpy_index = 0, next_cpy_index = current_sg->length; | ||
125 | |||
126 | while (next_cpy_index < len) { | ||
127 | memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), | ||
128 | current_sg->length); | ||
129 | current_sg = scatterwalk_sg_next(current_sg); | ||
130 | cpy_index = next_cpy_index; | ||
131 | next_cpy_index += current_sg->length; | ||
132 | } | ||
133 | if (cpy_index < len) | ||
134 | memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), | ||
135 | len - cpy_index); | ||
136 | } | ||
137 | |||
138 | /* Copy sg data, from to_skip to end, to dest */ | ||
139 | static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, | ||
140 | int to_skip, unsigned int end) | ||
141 | { | ||
142 | struct scatterlist *current_sg = sg; | ||
143 | int sg_index, cpy_index; | ||
144 | |||
145 | sg_index = current_sg->length; | ||
146 | while (sg_index <= to_skip) { | ||
147 | current_sg = scatterwalk_sg_next(current_sg); | ||
148 | sg_index += current_sg->length; | ||
149 | } | ||
150 | cpy_index = sg_index - to_skip; | ||
151 | memcpy(dest, (u8 *) sg_virt(current_sg) + | ||
152 | current_sg->length - cpy_index, cpy_index); | ||
153 | current_sg = scatterwalk_sg_next(current_sg); | ||
154 | if (end - sg_index) | ||
155 | sg_copy(dest + cpy_index, current_sg, end - sg_index); | ||
156 | } | ||
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 1cc6b3f3e262..21c1a87032b7 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #define MV_CESA "MV-CESA:" | 25 | #define MV_CESA "MV-CESA:" |
26 | #define MAX_HW_HASH_SIZE 0xFFFF | 26 | #define MAX_HW_HASH_SIZE 0xFFFF |
27 | #define MV_CESA_EXPIRE 500 /* msec */ | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * STM: | 30 | * STM: |
@@ -87,6 +88,7 @@ struct crypto_priv { | |||
87 | spinlock_t lock; | 88 | spinlock_t lock; |
88 | struct crypto_queue queue; | 89 | struct crypto_queue queue; |
89 | enum engine_status eng_st; | 90 | enum engine_status eng_st; |
91 | struct timer_list completion_timer; | ||
90 | struct crypto_async_request *cur_req; | 92 | struct crypto_async_request *cur_req; |
91 | struct req_progress p; | 93 | struct req_progress p; |
92 | int max_req_size; | 94 | int max_req_size; |
@@ -138,6 +140,29 @@ struct mv_req_hash_ctx { | |||
138 | int count_add; | 140 | int count_add; |
139 | }; | 141 | }; |
140 | 142 | ||
143 | static void mv_completion_timer_callback(unsigned long unused) | ||
144 | { | ||
145 | int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; | ||
146 | |||
147 | printk(KERN_ERR MV_CESA | ||
148 | "completion timer expired (CESA %sactive), cleaning up.\n", | ||
149 | active ? "" : "in"); | ||
150 | |||
151 | del_timer(&cpg->completion_timer); | ||
152 | writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD); | ||
153 | while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC) | ||
154 | printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__); | ||
155 | cpg->eng_st = ENGINE_W_DEQUEUE; | ||
156 | wake_up_process(cpg->queue_th); | ||
157 | } | ||
158 | |||
159 | static void mv_setup_timer(void) | ||
160 | { | ||
161 | setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0); | ||
162 | mod_timer(&cpg->completion_timer, | ||
163 | jiffies + msecs_to_jiffies(MV_CESA_EXPIRE)); | ||
164 | } | ||
165 | |||
141 | static void compute_aes_dec_key(struct mv_ctx *ctx) | 166 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
142 | { | 167 | { |
143 | struct crypto_aes_ctx gen_aes_key; | 168 | struct crypto_aes_ctx gen_aes_key; |
@@ -273,12 +298,8 @@ static void mv_process_current_q(int first_block) | |||
273 | sizeof(struct sec_accel_config)); | 298 | sizeof(struct sec_accel_config)); |
274 | 299 | ||
275 | /* GO */ | 300 | /* GO */ |
301 | mv_setup_timer(); | ||
276 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 302 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
277 | |||
278 | /* | ||
279 | * XXX: add timer if the interrupt does not occur for some mystery | ||
280 | * reason | ||
281 | */ | ||
282 | } | 303 | } |
283 | 304 | ||
284 | static void mv_crypto_algo_completion(void) | 305 | static void mv_crypto_algo_completion(void) |
@@ -357,12 +378,8 @@ static void mv_process_hash_current(int first_block) | |||
357 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | 378 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); |
358 | 379 | ||
359 | /* GO */ | 380 | /* GO */ |
381 | mv_setup_timer(); | ||
360 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 382 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
361 | |||
362 | /* | ||
363 | * XXX: add timer if the interrupt does not occur for some mystery | ||
364 | * reason | ||
365 | */ | ||
366 | } | 383 | } |
367 | 384 | ||
368 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, | 385 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, |
@@ -406,6 +423,15 @@ out: | |||
406 | return rc; | 423 | return rc; |
407 | } | 424 | } |
408 | 425 | ||
426 | static void mv_save_digest_state(struct mv_req_hash_ctx *ctx) | ||
427 | { | ||
428 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
429 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
430 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
431 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
432 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
433 | } | ||
434 | |||
409 | static void mv_hash_algo_completion(void) | 435 | static void mv_hash_algo_completion(void) |
410 | { | 436 | { |
411 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | 437 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); |
@@ -420,14 +446,12 @@ static void mv_hash_algo_completion(void) | |||
420 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | 446 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, |
421 | crypto_ahash_digestsize(crypto_ahash_reqtfm | 447 | crypto_ahash_digestsize(crypto_ahash_reqtfm |
422 | (req))); | 448 | (req))); |
423 | } else | 449 | } else { |
450 | mv_save_digest_state(ctx); | ||
424 | mv_hash_final_fallback(req); | 451 | mv_hash_final_fallback(req); |
452 | } | ||
425 | } else { | 453 | } else { |
426 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | 454 | mv_save_digest_state(ctx); |
427 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
428 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
429 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
430 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
431 | } | 455 | } |
432 | } | 456 | } |
433 | 457 | ||
@@ -888,6 +912,10 @@ irqreturn_t crypto_int(int irq, void *priv) | |||
888 | if (!(val & SEC_INT_ACCEL0_DONE)) | 912 | if (!(val & SEC_INT_ACCEL0_DONE)) |
889 | return IRQ_NONE; | 913 | return IRQ_NONE; |
890 | 914 | ||
915 | if (!del_timer(&cpg->completion_timer)) { | ||
916 | printk(KERN_WARNING MV_CESA | ||
917 | "got an interrupt but no pending timer?\n"); | ||
918 | } | ||
891 | val &= ~SEC_INT_ACCEL0_DONE; | 919 | val &= ~SEC_INT_ACCEL0_DONE; |
892 | writel(val, cpg->reg + FPGA_INT_STATUS); | 920 | writel(val, cpg->reg + FPGA_INT_STATUS); |
893 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); | 921 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); |
@@ -1061,6 +1089,7 @@ static int mv_probe(struct platform_device *pdev) | |||
1061 | if (!IS_ERR(cp->clk)) | 1089 | if (!IS_ERR(cp->clk)) |
1062 | clk_prepare_enable(cp->clk); | 1090 | clk_prepare_enable(cp->clk); |
1063 | 1091 | ||
1092 | writel(0, cpg->reg + SEC_ACCEL_INT_STATUS); | ||
1064 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | 1093 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
1065 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | 1094 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
1066 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | 1095 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); |
@@ -1098,6 +1127,10 @@ err_unreg_ecb: | |||
1098 | crypto_unregister_alg(&mv_aes_alg_ecb); | 1127 | crypto_unregister_alg(&mv_aes_alg_ecb); |
1099 | err_irq: | 1128 | err_irq: |
1100 | free_irq(irq, cp); | 1129 | free_irq(irq, cp); |
1130 | if (!IS_ERR(cp->clk)) { | ||
1131 | clk_disable_unprepare(cp->clk); | ||
1132 | clk_put(cp->clk); | ||
1133 | } | ||
1101 | err_thread: | 1134 | err_thread: |
1102 | kthread_stop(cp->queue_th); | 1135 | kthread_stop(cp->queue_th); |
1103 | err_unmap_sram: | 1136 | err_unmap_sram: |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 921039e56f87..efff788d2f1d 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -53,117 +53,6 @@ | |||
53 | 53 | ||
54 | #include "talitos.h" | 54 | #include "talitos.h" |
55 | 55 | ||
56 | #define TALITOS_TIMEOUT 100000 | ||
57 | #define TALITOS_MAX_DATA_LEN 65535 | ||
58 | |||
59 | #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) | ||
60 | #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) | ||
61 | #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf) | ||
62 | |||
63 | /* descriptor pointer entry */ | ||
64 | struct talitos_ptr { | ||
65 | __be16 len; /* length */ | ||
66 | u8 j_extent; /* jump to sg link table and/or extent */ | ||
67 | u8 eptr; /* extended address */ | ||
68 | __be32 ptr; /* address */ | ||
69 | }; | ||
70 | |||
71 | static const struct talitos_ptr zero_entry = { | ||
72 | .len = 0, | ||
73 | .j_extent = 0, | ||
74 | .eptr = 0, | ||
75 | .ptr = 0 | ||
76 | }; | ||
77 | |||
78 | /* descriptor */ | ||
79 | struct talitos_desc { | ||
80 | __be32 hdr; /* header high bits */ | ||
81 | __be32 hdr_lo; /* header low bits */ | ||
82 | struct talitos_ptr ptr[7]; /* ptr/len pair array */ | ||
83 | }; | ||
84 | |||
85 | /** | ||
86 | * talitos_request - descriptor submission request | ||
87 | * @desc: descriptor pointer (kernel virtual) | ||
88 | * @dma_desc: descriptor's physical bus address | ||
89 | * @callback: whom to call when descriptor processing is done | ||
90 | * @context: caller context (optional) | ||
91 | */ | ||
92 | struct talitos_request { | ||
93 | struct talitos_desc *desc; | ||
94 | dma_addr_t dma_desc; | ||
95 | void (*callback) (struct device *dev, struct talitos_desc *desc, | ||
96 | void *context, int error); | ||
97 | void *context; | ||
98 | }; | ||
99 | |||
100 | /* per-channel fifo management */ | ||
101 | struct talitos_channel { | ||
102 | void __iomem *reg; | ||
103 | |||
104 | /* request fifo */ | ||
105 | struct talitos_request *fifo; | ||
106 | |||
107 | /* number of requests pending in channel h/w fifo */ | ||
108 | atomic_t submit_count ____cacheline_aligned; | ||
109 | |||
110 | /* request submission (head) lock */ | ||
111 | spinlock_t head_lock ____cacheline_aligned; | ||
112 | /* index to next free descriptor request */ | ||
113 | int head; | ||
114 | |||
115 | /* request release (tail) lock */ | ||
116 | spinlock_t tail_lock ____cacheline_aligned; | ||
117 | /* index to next in-progress/done descriptor request */ | ||
118 | int tail; | ||
119 | }; | ||
120 | |||
121 | struct talitos_private { | ||
122 | struct device *dev; | ||
123 | struct platform_device *ofdev; | ||
124 | void __iomem *reg; | ||
125 | int irq[2]; | ||
126 | |||
127 | /* SEC global registers lock */ | ||
128 | spinlock_t reg_lock ____cacheline_aligned; | ||
129 | |||
130 | /* SEC version geometry (from device tree node) */ | ||
131 | unsigned int num_channels; | ||
132 | unsigned int chfifo_len; | ||
133 | unsigned int exec_units; | ||
134 | unsigned int desc_types; | ||
135 | |||
136 | /* SEC Compatibility info */ | ||
137 | unsigned long features; | ||
138 | |||
139 | /* | ||
140 | * length of the request fifo | ||
141 | * fifo_len is chfifo_len rounded up to next power of 2 | ||
142 | * so we can use bitwise ops to wrap | ||
143 | */ | ||
144 | unsigned int fifo_len; | ||
145 | |||
146 | struct talitos_channel *chan; | ||
147 | |||
148 | /* next channel to be assigned next incoming descriptor */ | ||
149 | atomic_t last_chan ____cacheline_aligned; | ||
150 | |||
151 | /* request callback tasklet */ | ||
152 | struct tasklet_struct done_task[2]; | ||
153 | |||
154 | /* list of registered algorithms */ | ||
155 | struct list_head alg_list; | ||
156 | |||
157 | /* hwrng device */ | ||
158 | struct hwrng rng; | ||
159 | }; | ||
160 | |||
161 | /* .features flag */ | ||
162 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | ||
163 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | ||
164 | #define TALITOS_FTR_SHA224_HWINIT 0x00000004 | ||
165 | #define TALITOS_FTR_HMAC_OK 0x00000008 | ||
166 | |||
167 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | 56 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) |
168 | { | 57 | { |
169 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); | 58 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); |
@@ -303,11 +192,11 @@ static int init_device(struct device *dev) | |||
303 | * callback must check err and feedback in descriptor header | 192 | * callback must check err and feedback in descriptor header |
304 | * for device processing status. | 193 | * for device processing status. |
305 | */ | 194 | */ |
306 | static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | 195 | int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, |
307 | void (*callback)(struct device *dev, | 196 | void (*callback)(struct device *dev, |
308 | struct talitos_desc *desc, | 197 | struct talitos_desc *desc, |
309 | void *context, int error), | 198 | void *context, int error), |
310 | void *context) | 199 | void *context) |
311 | { | 200 | { |
312 | struct talitos_private *priv = dev_get_drvdata(dev); | 201 | struct talitos_private *priv = dev_get_drvdata(dev); |
313 | struct talitos_request *request; | 202 | struct talitos_request *request; |
@@ -348,6 +237,7 @@ static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
348 | 237 | ||
349 | return -EINPROGRESS; | 238 | return -EINPROGRESS; |
350 | } | 239 | } |
240 | EXPORT_SYMBOL(talitos_submit); | ||
351 | 241 | ||
352 | /* | 242 | /* |
353 | * process what was done, notify callback of error if not | 243 | * process what was done, notify callback of error if not |
@@ -733,7 +623,7 @@ static void talitos_unregister_rng(struct device *dev) | |||
733 | * crypto alg | 623 | * crypto alg |
734 | */ | 624 | */ |
735 | #define TALITOS_CRA_PRIORITY 3000 | 625 | #define TALITOS_CRA_PRIORITY 3000 |
736 | #define TALITOS_MAX_KEY_SIZE 64 | 626 | #define TALITOS_MAX_KEY_SIZE 96 |
737 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | 627 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ |
738 | 628 | ||
739 | #define MD5_BLOCK_SIZE 64 | 629 | #define MD5_BLOCK_SIZE 64 |
@@ -2066,6 +1956,59 @@ static struct talitos_alg_template driver_algs[] = { | |||
2066 | DESC_HDR_MODE1_MDEU_PAD | | 1956 | DESC_HDR_MODE1_MDEU_PAD | |
2067 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1957 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
2068 | }, | 1958 | }, |
1959 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
1960 | .alg.crypto = { | ||
1961 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | ||
1962 | .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos", | ||
1963 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1964 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1965 | .cra_type = &crypto_aead_type, | ||
1966 | .cra_aead = { | ||
1967 | .setkey = aead_setkey, | ||
1968 | .setauthsize = aead_setauthsize, | ||
1969 | .encrypt = aead_encrypt, | ||
1970 | .decrypt = aead_decrypt, | ||
1971 | .givencrypt = aead_givencrypt, | ||
1972 | .geniv = "<built-in>", | ||
1973 | .ivsize = AES_BLOCK_SIZE, | ||
1974 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
1975 | } | ||
1976 | }, | ||
1977 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
1978 | DESC_HDR_SEL0_AESU | | ||
1979 | DESC_HDR_MODE0_AESU_CBC | | ||
1980 | DESC_HDR_SEL1_MDEUA | | ||
1981 | DESC_HDR_MODE1_MDEU_INIT | | ||
1982 | DESC_HDR_MODE1_MDEU_PAD | | ||
1983 | DESC_HDR_MODE1_MDEU_SHA224_HMAC, | ||
1984 | }, | ||
1985 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
1986 | .alg.crypto = { | ||
1987 | .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", | ||
1988 | .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos", | ||
1989 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1990 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1991 | .cra_type = &crypto_aead_type, | ||
1992 | .cra_aead = { | ||
1993 | .setkey = aead_setkey, | ||
1994 | .setauthsize = aead_setauthsize, | ||
1995 | .encrypt = aead_encrypt, | ||
1996 | .decrypt = aead_decrypt, | ||
1997 | .givencrypt = aead_givencrypt, | ||
1998 | .geniv = "<built-in>", | ||
1999 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
2000 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
2001 | } | ||
2002 | }, | ||
2003 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
2004 | DESC_HDR_SEL0_DEU | | ||
2005 | DESC_HDR_MODE0_DEU_CBC | | ||
2006 | DESC_HDR_MODE0_DEU_3DES | | ||
2007 | DESC_HDR_SEL1_MDEUA | | ||
2008 | DESC_HDR_MODE1_MDEU_INIT | | ||
2009 | DESC_HDR_MODE1_MDEU_PAD | | ||
2010 | DESC_HDR_MODE1_MDEU_SHA224_HMAC, | ||
2011 | }, | ||
2069 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2012 | { .type = CRYPTO_ALG_TYPE_AEAD, |
2070 | .alg.crypto = { | 2013 | .alg.crypto = { |
2071 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 2014 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
@@ -2121,6 +2064,112 @@ static struct talitos_alg_template driver_algs[] = { | |||
2121 | }, | 2064 | }, |
2122 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2065 | { .type = CRYPTO_ALG_TYPE_AEAD, |
2123 | .alg.crypto = { | 2066 | .alg.crypto = { |
2067 | .cra_name = "authenc(hmac(sha384),cbc(aes))", | ||
2068 | .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos", | ||
2069 | .cra_blocksize = AES_BLOCK_SIZE, | ||
2070 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
2071 | .cra_type = &crypto_aead_type, | ||
2072 | .cra_aead = { | ||
2073 | .setkey = aead_setkey, | ||
2074 | .setauthsize = aead_setauthsize, | ||
2075 | .encrypt = aead_encrypt, | ||
2076 | .decrypt = aead_decrypt, | ||
2077 | .givencrypt = aead_givencrypt, | ||
2078 | .geniv = "<built-in>", | ||
2079 | .ivsize = AES_BLOCK_SIZE, | ||
2080 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
2081 | } | ||
2082 | }, | ||
2083 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
2084 | DESC_HDR_SEL0_AESU | | ||
2085 | DESC_HDR_MODE0_AESU_CBC | | ||
2086 | DESC_HDR_SEL1_MDEUB | | ||
2087 | DESC_HDR_MODE1_MDEU_INIT | | ||
2088 | DESC_HDR_MODE1_MDEU_PAD | | ||
2089 | DESC_HDR_MODE1_MDEUB_SHA384_HMAC, | ||
2090 | }, | ||
2091 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
2092 | .alg.crypto = { | ||
2093 | .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", | ||
2094 | .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos", | ||
2095 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
2096 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
2097 | .cra_type = &crypto_aead_type, | ||
2098 | .cra_aead = { | ||
2099 | .setkey = aead_setkey, | ||
2100 | .setauthsize = aead_setauthsize, | ||
2101 | .encrypt = aead_encrypt, | ||
2102 | .decrypt = aead_decrypt, | ||
2103 | .givencrypt = aead_givencrypt, | ||
2104 | .geniv = "<built-in>", | ||
2105 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
2106 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
2107 | } | ||
2108 | }, | ||
2109 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
2110 | DESC_HDR_SEL0_DEU | | ||
2111 | DESC_HDR_MODE0_DEU_CBC | | ||
2112 | DESC_HDR_MODE0_DEU_3DES | | ||
2113 | DESC_HDR_SEL1_MDEUB | | ||
2114 | DESC_HDR_MODE1_MDEU_INIT | | ||
2115 | DESC_HDR_MODE1_MDEU_PAD | | ||
2116 | DESC_HDR_MODE1_MDEUB_SHA384_HMAC, | ||
2117 | }, | ||
2118 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
2119 | .alg.crypto = { | ||
2120 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | ||
2121 | .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos", | ||
2122 | .cra_blocksize = AES_BLOCK_SIZE, | ||
2123 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
2124 | .cra_type = &crypto_aead_type, | ||
2125 | .cra_aead = { | ||
2126 | .setkey = aead_setkey, | ||
2127 | .setauthsize = aead_setauthsize, | ||
2128 | .encrypt = aead_encrypt, | ||
2129 | .decrypt = aead_decrypt, | ||
2130 | .givencrypt = aead_givencrypt, | ||
2131 | .geniv = "<built-in>", | ||
2132 | .ivsize = AES_BLOCK_SIZE, | ||
2133 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
2134 | } | ||
2135 | }, | ||
2136 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
2137 | DESC_HDR_SEL0_AESU | | ||
2138 | DESC_HDR_MODE0_AESU_CBC | | ||
2139 | DESC_HDR_SEL1_MDEUB | | ||
2140 | DESC_HDR_MODE1_MDEU_INIT | | ||
2141 | DESC_HDR_MODE1_MDEU_PAD | | ||
2142 | DESC_HDR_MODE1_MDEUB_SHA512_HMAC, | ||
2143 | }, | ||
2144 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
2145 | .alg.crypto = { | ||
2146 | .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", | ||
2147 | .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos", | ||
2148 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
2149 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
2150 | .cra_type = &crypto_aead_type, | ||
2151 | .cra_aead = { | ||
2152 | .setkey = aead_setkey, | ||
2153 | .setauthsize = aead_setauthsize, | ||
2154 | .encrypt = aead_encrypt, | ||
2155 | .decrypt = aead_decrypt, | ||
2156 | .givencrypt = aead_givencrypt, | ||
2157 | .geniv = "<built-in>", | ||
2158 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
2159 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
2160 | } | ||
2161 | }, | ||
2162 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
2163 | DESC_HDR_SEL0_DEU | | ||
2164 | DESC_HDR_MODE0_DEU_CBC | | ||
2165 | DESC_HDR_MODE0_DEU_3DES | | ||
2166 | DESC_HDR_SEL1_MDEUB | | ||
2167 | DESC_HDR_MODE1_MDEU_INIT | | ||
2168 | DESC_HDR_MODE1_MDEU_PAD | | ||
2169 | DESC_HDR_MODE1_MDEUB_SHA512_HMAC, | ||
2170 | }, | ||
2171 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
2172 | .alg.crypto = { | ||
2124 | .cra_name = "authenc(hmac(md5),cbc(aes))", | 2173 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
2125 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", | 2174 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", |
2126 | .cra_blocksize = AES_BLOCK_SIZE, | 2175 | .cra_blocksize = AES_BLOCK_SIZE, |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 3c173954ef29..61a14054aa39 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -28,6 +28,123 @@ | |||
28 | * | 28 | * |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #define TALITOS_TIMEOUT 100000 | ||
32 | #define TALITOS_MAX_DATA_LEN 65535 | ||
33 | |||
34 | #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) | ||
35 | #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) | ||
36 | #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf) | ||
37 | |||
38 | /* descriptor pointer entry */ | ||
39 | struct talitos_ptr { | ||
40 | __be16 len; /* length */ | ||
41 | u8 j_extent; /* jump to sg link table and/or extent */ | ||
42 | u8 eptr; /* extended address */ | ||
43 | __be32 ptr; /* address */ | ||
44 | }; | ||
45 | |||
46 | static const struct talitos_ptr zero_entry = { | ||
47 | .len = 0, | ||
48 | .j_extent = 0, | ||
49 | .eptr = 0, | ||
50 | .ptr = 0 | ||
51 | }; | ||
52 | |||
53 | /* descriptor */ | ||
54 | struct talitos_desc { | ||
55 | __be32 hdr; /* header high bits */ | ||
56 | __be32 hdr_lo; /* header low bits */ | ||
57 | struct talitos_ptr ptr[7]; /* ptr/len pair array */ | ||
58 | }; | ||
59 | |||
60 | /** | ||
61 | * talitos_request - descriptor submission request | ||
62 | * @desc: descriptor pointer (kernel virtual) | ||
63 | * @dma_desc: descriptor's physical bus address | ||
64 | * @callback: whom to call when descriptor processing is done | ||
65 | * @context: caller context (optional) | ||
66 | */ | ||
67 | struct talitos_request { | ||
68 | struct talitos_desc *desc; | ||
69 | dma_addr_t dma_desc; | ||
70 | void (*callback) (struct device *dev, struct talitos_desc *desc, | ||
71 | void *context, int error); | ||
72 | void *context; | ||
73 | }; | ||
74 | |||
75 | /* per-channel fifo management */ | ||
76 | struct talitos_channel { | ||
77 | void __iomem *reg; | ||
78 | |||
79 | /* request fifo */ | ||
80 | struct talitos_request *fifo; | ||
81 | |||
82 | /* number of requests pending in channel h/w fifo */ | ||
83 | atomic_t submit_count ____cacheline_aligned; | ||
84 | |||
85 | /* request submission (head) lock */ | ||
86 | spinlock_t head_lock ____cacheline_aligned; | ||
87 | /* index to next free descriptor request */ | ||
88 | int head; | ||
89 | |||
90 | /* request release (tail) lock */ | ||
91 | spinlock_t tail_lock ____cacheline_aligned; | ||
92 | /* index to next in-progress/done descriptor request */ | ||
93 | int tail; | ||
94 | }; | ||
95 | |||
96 | struct talitos_private { | ||
97 | struct device *dev; | ||
98 | struct platform_device *ofdev; | ||
99 | void __iomem *reg; | ||
100 | int irq[2]; | ||
101 | |||
102 | /* SEC global registers lock */ | ||
103 | spinlock_t reg_lock ____cacheline_aligned; | ||
104 | |||
105 | /* SEC version geometry (from device tree node) */ | ||
106 | unsigned int num_channels; | ||
107 | unsigned int chfifo_len; | ||
108 | unsigned int exec_units; | ||
109 | unsigned int desc_types; | ||
110 | |||
111 | /* SEC Compatibility info */ | ||
112 | unsigned long features; | ||
113 | |||
114 | /* | ||
115 | * length of the request fifo | ||
116 | * fifo_len is chfifo_len rounded up to next power of 2 | ||
117 | * so we can use bitwise ops to wrap | ||
118 | */ | ||
119 | unsigned int fifo_len; | ||
120 | |||
121 | struct talitos_channel *chan; | ||
122 | |||
123 | /* next channel to be assigned next incoming descriptor */ | ||
124 | atomic_t last_chan ____cacheline_aligned; | ||
125 | |||
126 | /* request callback tasklet */ | ||
127 | struct tasklet_struct done_task[2]; | ||
128 | |||
129 | /* list of registered algorithms */ | ||
130 | struct list_head alg_list; | ||
131 | |||
132 | /* hwrng device */ | ||
133 | struct hwrng rng; | ||
134 | }; | ||
135 | |||
136 | extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | ||
137 | void (*callback)(struct device *dev, | ||
138 | struct talitos_desc *desc, | ||
139 | void *context, int error), | ||
140 | void *context); | ||
141 | |||
142 | /* .features flag */ | ||
143 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | ||
144 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | ||
145 | #define TALITOS_FTR_SHA224_HWINIT 0x00000004 | ||
146 | #define TALITOS_FTR_HMAC_OK 0x00000008 | ||
147 | |||
31 | /* | 148 | /* |
32 | * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register | 149 | * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register |
33 | */ | 150 | */ |
@@ -209,6 +326,12 @@ | |||
209 | DESC_HDR_MODE1_MDEU_HMAC) | 326 | DESC_HDR_MODE1_MDEU_HMAC) |
210 | #define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \ | 327 | #define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \ |
211 | DESC_HDR_MODE1_MDEU_HMAC) | 328 | DESC_HDR_MODE1_MDEU_HMAC) |
329 | #define DESC_HDR_MODE1_MDEU_SHA224_HMAC (DESC_HDR_MODE1_MDEU_SHA224 | \ | ||
330 | DESC_HDR_MODE1_MDEU_HMAC) | ||
331 | #define DESC_HDR_MODE1_MDEUB_SHA384_HMAC (DESC_HDR_MODE1_MDEUB_SHA384 | \ | ||
332 | DESC_HDR_MODE1_MDEU_HMAC) | ||
333 | #define DESC_HDR_MODE1_MDEUB_SHA512_HMAC (DESC_HDR_MODE1_MDEUB_SHA512 | \ | ||
334 | DESC_HDR_MODE1_MDEU_HMAC) | ||
212 | 335 | ||
213 | /* direction of overall data flow (DIR) */ | 336 | /* direction of overall data flow (DIR) */ |
214 | #define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) | 337 | #define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) |
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c index 422a9766c7c9..ac236f6724f4 100644 --- a/drivers/crypto/tegra-aes.c +++ b/drivers/crypto/tegra-aes.c | |||
@@ -572,7 +572,7 @@ static void aes_workqueue_handler(struct work_struct *work) | |||
572 | struct tegra_aes_dev *dd = aes_dev; | 572 | struct tegra_aes_dev *dd = aes_dev; |
573 | int ret; | 573 | int ret; |
574 | 574 | ||
575 | ret = clk_enable(dd->aes_clk); | 575 | ret = clk_prepare_enable(dd->aes_clk); |
576 | if (ret) | 576 | if (ret) |
577 | BUG_ON("clock enable failed"); | 577 | BUG_ON("clock enable failed"); |
578 | 578 | ||
@@ -581,7 +581,7 @@ static void aes_workqueue_handler(struct work_struct *work) | |||
581 | ret = tegra_aes_handle_req(dd); | 581 | ret = tegra_aes_handle_req(dd); |
582 | } while (!ret); | 582 | } while (!ret); |
583 | 583 | ||
584 | clk_disable(dd->aes_clk); | 584 | clk_disable_unprepare(dd->aes_clk); |
585 | } | 585 | } |
586 | 586 | ||
587 | static irqreturn_t aes_irq(int irq, void *dev_id) | 587 | static irqreturn_t aes_irq(int irq, void *dev_id) |
@@ -673,7 +673,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata, | |||
673 | /* take mutex to access the aes hw */ | 673 | /* take mutex to access the aes hw */ |
674 | mutex_lock(&aes_lock); | 674 | mutex_lock(&aes_lock); |
675 | 675 | ||
676 | ret = clk_enable(dd->aes_clk); | 676 | ret = clk_prepare_enable(dd->aes_clk); |
677 | if (ret) | 677 | if (ret) |
678 | return ret; | 678 | return ret; |
679 | 679 | ||
@@ -700,7 +700,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata, | |||
700 | } | 700 | } |
701 | 701 | ||
702 | out: | 702 | out: |
703 | clk_disable(dd->aes_clk); | 703 | clk_disable_unprepare(dd->aes_clk); |
704 | mutex_unlock(&aes_lock); | 704 | mutex_unlock(&aes_lock); |
705 | 705 | ||
706 | dev_dbg(dd->dev, "%s: done\n", __func__); | 706 | dev_dbg(dd->dev, "%s: done\n", __func__); |
@@ -758,7 +758,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | |||
758 | 758 | ||
759 | dd->flags = FLAGS_ENCRYPT | FLAGS_RNG; | 759 | dd->flags = FLAGS_ENCRYPT | FLAGS_RNG; |
760 | 760 | ||
761 | ret = clk_enable(dd->aes_clk); | 761 | ret = clk_prepare_enable(dd->aes_clk); |
762 | if (ret) | 762 | if (ret) |
763 | return ret; | 763 | return ret; |
764 | 764 | ||
@@ -788,7 +788,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | |||
788 | memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); | 788 | memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); |
789 | 789 | ||
790 | out: | 790 | out: |
791 | clk_disable(dd->aes_clk); | 791 | clk_disable_unprepare(dd->aes_clk); |
792 | mutex_unlock(&aes_lock); | 792 | mutex_unlock(&aes_lock); |
793 | 793 | ||
794 | dev_dbg(dd->dev, "%s: done\n", __func__); | 794 | dev_dbg(dd->dev, "%s: done\n", __func__); |
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 7cac12793a4b..1c307e1b840c 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -1661,27 +1661,26 @@ static void ux500_cryp_shutdown(struct platform_device *pdev) | |||
1661 | 1661 | ||
1662 | } | 1662 | } |
1663 | 1663 | ||
1664 | static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state) | 1664 | static int ux500_cryp_suspend(struct device *dev) |
1665 | { | 1665 | { |
1666 | int ret; | 1666 | int ret; |
1667 | struct platform_device *pdev = to_platform_device(dev); | ||
1667 | struct cryp_device_data *device_data; | 1668 | struct cryp_device_data *device_data; |
1668 | struct resource *res_irq; | 1669 | struct resource *res_irq; |
1669 | struct cryp_ctx *temp_ctx = NULL; | 1670 | struct cryp_ctx *temp_ctx = NULL; |
1670 | 1671 | ||
1671 | dev_dbg(&pdev->dev, "[%s]", __func__); | 1672 | dev_dbg(dev, "[%s]", __func__); |
1672 | 1673 | ||
1673 | /* Handle state? */ | 1674 | /* Handle state? */ |
1674 | device_data = platform_get_drvdata(pdev); | 1675 | device_data = platform_get_drvdata(pdev); |
1675 | if (!device_data) { | 1676 | if (!device_data) { |
1676 | dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", | 1677 | dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); |
1677 | __func__); | ||
1678 | return -ENOMEM; | 1678 | return -ENOMEM; |
1679 | } | 1679 | } |
1680 | 1680 | ||
1681 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1681 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1682 | if (!res_irq) | 1682 | if (!res_irq) |
1683 | dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", | 1683 | dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__); |
1684 | __func__); | ||
1685 | else | 1684 | else |
1686 | disable_irq(res_irq->start); | 1685 | disable_irq(res_irq->start); |
1687 | 1686 | ||
@@ -1692,32 +1691,32 @@ static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state) | |||
1692 | 1691 | ||
1693 | if (device_data->current_ctx == ++temp_ctx) { | 1692 | if (device_data->current_ctx == ++temp_ctx) { |
1694 | if (down_interruptible(&driver_data.device_allocation)) | 1693 | if (down_interruptible(&driver_data.device_allocation)) |
1695 | dev_dbg(&pdev->dev, "[%s]: down_interruptible() " | 1694 | dev_dbg(dev, "[%s]: down_interruptible() failed", |
1696 | "failed", __func__); | 1695 | __func__); |
1697 | ret = cryp_disable_power(&pdev->dev, device_data, false); | 1696 | ret = cryp_disable_power(dev, device_data, false); |
1698 | 1697 | ||
1699 | } else | 1698 | } else |
1700 | ret = cryp_disable_power(&pdev->dev, device_data, true); | 1699 | ret = cryp_disable_power(dev, device_data, true); |
1701 | 1700 | ||
1702 | if (ret) | 1701 | if (ret) |
1703 | dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__); | 1702 | dev_err(dev, "[%s]: cryp_disable_power()", __func__); |
1704 | 1703 | ||
1705 | return ret; | 1704 | return ret; |
1706 | } | 1705 | } |
1707 | 1706 | ||
1708 | static int ux500_cryp_resume(struct platform_device *pdev) | 1707 | static int ux500_cryp_resume(struct device *dev) |
1709 | { | 1708 | { |
1710 | int ret = 0; | 1709 | int ret = 0; |
1710 | struct platform_device *pdev = to_platform_device(dev); | ||
1711 | struct cryp_device_data *device_data; | 1711 | struct cryp_device_data *device_data; |
1712 | struct resource *res_irq; | 1712 | struct resource *res_irq; |
1713 | struct cryp_ctx *temp_ctx = NULL; | 1713 | struct cryp_ctx *temp_ctx = NULL; |
1714 | 1714 | ||
1715 | dev_dbg(&pdev->dev, "[%s]", __func__); | 1715 | dev_dbg(dev, "[%s]", __func__); |
1716 | 1716 | ||
1717 | device_data = platform_get_drvdata(pdev); | 1717 | device_data = platform_get_drvdata(pdev); |
1718 | if (!device_data) { | 1718 | if (!device_data) { |
1719 | dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", | 1719 | dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); |
1720 | __func__); | ||
1721 | return -ENOMEM; | 1720 | return -ENOMEM; |
1722 | } | 1721 | } |
1723 | 1722 | ||
@@ -1730,11 +1729,10 @@ static int ux500_cryp_resume(struct platform_device *pdev) | |||
1730 | if (!device_data->current_ctx) | 1729 | if (!device_data->current_ctx) |
1731 | up(&driver_data.device_allocation); | 1730 | up(&driver_data.device_allocation); |
1732 | else | 1731 | else |
1733 | ret = cryp_enable_power(&pdev->dev, device_data, true); | 1732 | ret = cryp_enable_power(dev, device_data, true); |
1734 | 1733 | ||
1735 | if (ret) | 1734 | if (ret) |
1736 | dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!", | 1735 | dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); |
1737 | __func__); | ||
1738 | else { | 1736 | else { |
1739 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1737 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1740 | if (res_irq) | 1738 | if (res_irq) |
@@ -1744,15 +1742,16 @@ static int ux500_cryp_resume(struct platform_device *pdev) | |||
1744 | return ret; | 1742 | return ret; |
1745 | } | 1743 | } |
1746 | 1744 | ||
1745 | static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); | ||
1746 | |||
1747 | static struct platform_driver cryp_driver = { | 1747 | static struct platform_driver cryp_driver = { |
1748 | .probe = ux500_cryp_probe, | 1748 | .probe = ux500_cryp_probe, |
1749 | .remove = ux500_cryp_remove, | 1749 | .remove = ux500_cryp_remove, |
1750 | .shutdown = ux500_cryp_shutdown, | 1750 | .shutdown = ux500_cryp_shutdown, |
1751 | .suspend = ux500_cryp_suspend, | ||
1752 | .resume = ux500_cryp_resume, | ||
1753 | .driver = { | 1751 | .driver = { |
1754 | .owner = THIS_MODULE, | 1752 | .owner = THIS_MODULE, |
1755 | .name = "cryp1" | 1753 | .name = "cryp1" |
1754 | .pm = &ux500_cryp_pm, | ||
1756 | } | 1755 | } |
1757 | }; | 1756 | }; |
1758 | 1757 | ||
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 6dbb9ec709a3..08d5032cb564 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -1894,19 +1894,17 @@ static void ux500_hash_shutdown(struct platform_device *pdev) | |||
1894 | 1894 | ||
1895 | /** | 1895 | /** |
1896 | * ux500_hash_suspend - Function that suspends the hash device. | 1896 | * ux500_hash_suspend - Function that suspends the hash device. |
1897 | * @pdev: The platform device. | 1897 | * @dev: Device to suspend. |
1898 | * @state: - | ||
1899 | */ | 1898 | */ |
1900 | static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) | 1899 | static int ux500_hash_suspend(struct device *dev) |
1901 | { | 1900 | { |
1902 | int ret; | 1901 | int ret; |
1903 | struct hash_device_data *device_data; | 1902 | struct hash_device_data *device_data; |
1904 | struct hash_ctx *temp_ctx = NULL; | 1903 | struct hash_ctx *temp_ctx = NULL; |
1905 | 1904 | ||
1906 | device_data = platform_get_drvdata(pdev); | 1905 | device_data = dev_get_drvdata(dev); |
1907 | if (!device_data) { | 1906 | if (!device_data) { |
1908 | dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", | 1907 | dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); |
1909 | __func__); | ||
1910 | return -ENOMEM; | 1908 | return -ENOMEM; |
1911 | } | 1909 | } |
1912 | 1910 | ||
@@ -1917,33 +1915,32 @@ static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) | |||
1917 | 1915 | ||
1918 | if (device_data->current_ctx == ++temp_ctx) { | 1916 | if (device_data->current_ctx == ++temp_ctx) { |
1919 | if (down_interruptible(&driver_data.device_allocation)) | 1917 | if (down_interruptible(&driver_data.device_allocation)) |
1920 | dev_dbg(&pdev->dev, "[%s]: down_interruptible() " | 1918 | dev_dbg(dev, "[%s]: down_interruptible() failed", |
1921 | "failed", __func__); | 1919 | __func__); |
1922 | ret = hash_disable_power(device_data, false); | 1920 | ret = hash_disable_power(device_data, false); |
1923 | 1921 | ||
1924 | } else | 1922 | } else |
1925 | ret = hash_disable_power(device_data, true); | 1923 | ret = hash_disable_power(device_data, true); |
1926 | 1924 | ||
1927 | if (ret) | 1925 | if (ret) |
1928 | dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__); | 1926 | dev_err(dev, "[%s]: hash_disable_power()", __func__); |
1929 | 1927 | ||
1930 | return ret; | 1928 | return ret; |
1931 | } | 1929 | } |
1932 | 1930 | ||
1933 | /** | 1931 | /** |
1934 | * ux500_hash_resume - Function that resume the hash device. | 1932 | * ux500_hash_resume - Function that resume the hash device. |
1935 | * @pdev: The platform device. | 1933 | * @dev: Device to resume. |
1936 | */ | 1934 | */ |
1937 | static int ux500_hash_resume(struct platform_device *pdev) | 1935 | static int ux500_hash_resume(struct device *dev) |
1938 | { | 1936 | { |
1939 | int ret = 0; | 1937 | int ret = 0; |
1940 | struct hash_device_data *device_data; | 1938 | struct hash_device_data *device_data; |
1941 | struct hash_ctx *temp_ctx = NULL; | 1939 | struct hash_ctx *temp_ctx = NULL; |
1942 | 1940 | ||
1943 | device_data = platform_get_drvdata(pdev); | 1941 | device_data = dev_get_drvdata(dev); |
1944 | if (!device_data) { | 1942 | if (!device_data) { |
1945 | dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", | 1943 | dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); |
1946 | __func__); | ||
1947 | return -ENOMEM; | 1944 | return -ENOMEM; |
1948 | } | 1945 | } |
1949 | 1946 | ||
@@ -1958,21 +1955,21 @@ static int ux500_hash_resume(struct platform_device *pdev) | |||
1958 | ret = hash_enable_power(device_data, true); | 1955 | ret = hash_enable_power(device_data, true); |
1959 | 1956 | ||
1960 | if (ret) | 1957 | if (ret) |
1961 | dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!", | 1958 | dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); |
1962 | __func__); | ||
1963 | 1959 | ||
1964 | return ret; | 1960 | return ret; |
1965 | } | 1961 | } |
1966 | 1962 | ||
1963 | static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); | ||
1964 | |||
1967 | static struct platform_driver hash_driver = { | 1965 | static struct platform_driver hash_driver = { |
1968 | .probe = ux500_hash_probe, | 1966 | .probe = ux500_hash_probe, |
1969 | .remove = ux500_hash_remove, | 1967 | .remove = ux500_hash_remove, |
1970 | .shutdown = ux500_hash_shutdown, | 1968 | .shutdown = ux500_hash_shutdown, |
1971 | .suspend = ux500_hash_suspend, | ||
1972 | .resume = ux500_hash_resume, | ||
1973 | .driver = { | 1969 | .driver = { |
1974 | .owner = THIS_MODULE, | 1970 | .owner = THIS_MODULE, |
1975 | .name = "hash1", | 1971 | .name = "hash1", |
1972 | .pm = &ux500_hash_pm, | ||
1976 | } | 1973 | } |
1977 | }; | 1974 | }; |
1978 | 1975 | ||