diff options
author | Lars Persson <lars.persson@axis.com> | 2017-08-10 08:53:53 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2017-08-22 02:54:52 -0400 |
commit | a21eb94fc4d3c6472de53bd30a543ec06eaf8914 (patch) | |
tree | e2ee7afe07af59f4f14b6fcc8530bdfdb040ca27 | |
parent | 6f7473c524cc4f875dcd9397ec9a6ec039bd08b6 (diff) |
crypto: axis - add ARTPEC-6/7 crypto accelerator driver
This is an asynchronous crypto API driver for the accelerator present
in the ARTPEC-6 and -7 SoCs from Axis Communications AB.
The driver supports AES in ECB/CTR/CBC/XTS/GCM modes and SHA1/2 hash
standards.
Signed-off-by: Lars Persson <larper@axis.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/Kconfig | 21 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/axis/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/axis/artpec6_crypto.c | 3192 |
4 files changed, 3215 insertions, 0 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5b5393f1b87a..fe33c199fc1a 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -708,4 +708,25 @@ config CRYPTO_DEV_SAFEXCEL | |||
708 | chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash | 708 | chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash |
709 | algorithms. | 709 | algorithms. |
710 | 710 | ||
711 | config CRYPTO_DEV_ARTPEC6 | ||
712 | tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration." | ||
713 | depends on ARM && (ARCH_ARTPEC || COMPILE_TEST) | ||
714 | depends on HAS_DMA | ||
715 | depends on OF | ||
716 | select CRYPTO_AEAD | ||
717 | select CRYPTO_AES | ||
718 | select CRYPTO_ALGAPI | ||
719 | select CRYPTO_BLKCIPHER | ||
720 | select CRYPTO_CTR | ||
721 | select CRYPTO_HASH | ||
722 | select CRYPTO_SHA1 | ||
723 | select CRYPTO_SHA256 | ||
724 | select CRYPTO_SHA384 | ||
725 | select CRYPTO_SHA512 | ||
726 | help | ||
727 | Enables the driver for the on-chip crypto accelerator | ||
728 | of Axis ARTPEC SoCs. | ||
729 | |||
730 | To compile this driver as a module, choose M here. | ||
731 | |||
711 | endif # CRYPTO_HW | 732 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index b12eb3c99430..808432b44c6b 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -44,3 +44,4 @@ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ | |||
44 | obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ | 44 | obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ |
45 | obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ | 45 | obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ |
46 | obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ | 46 | obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ |
47 | obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ | ||
diff --git a/drivers/crypto/axis/Makefile b/drivers/crypto/axis/Makefile new file mode 100644 index 000000000000..be9a84a4b667 --- /dev/null +++ b/drivers/crypto/axis/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) := artpec6_crypto.o | |||
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c new file mode 100644 index 000000000000..d9fbbf01062b --- /dev/null +++ b/drivers/crypto/axis/artpec6_crypto.c | |||
@@ -0,0 +1,3192 @@ | |||
1 | /* | ||
2 | * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api. | ||
3 | * | ||
4 | * Copyright (C) 2014-2017 Axis Communications AB | ||
5 | */ | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
7 | |||
8 | #include <linux/bitfield.h> | ||
9 | #include <linux/crypto.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/fault-inject.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/of.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/scatterlist.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #include <crypto/aes.h> | ||
25 | #include <crypto/internal/aead.h> | ||
26 | #include <crypto/internal/hash.h> | ||
27 | #include <crypto/internal/skcipher.h> | ||
28 | #include <crypto/scatterwalk.h> | ||
29 | #include <crypto/sha.h> | ||
30 | #include <crypto/xts.h> | ||
31 | |||
32 | /* Max length of a line in all cache levels for Artpec SoCs. */ | ||
33 | #define ARTPEC_CACHE_LINE_MAX 32 | ||
34 | |||
35 | #define PDMA_OUT_CFG 0x0000 | ||
36 | #define PDMA_OUT_BUF_CFG 0x0004 | ||
37 | #define PDMA_OUT_CMD 0x0008 | ||
38 | #define PDMA_OUT_DESCRQ_PUSH 0x0010 | ||
39 | #define PDMA_OUT_DESCRQ_STAT 0x0014 | ||
40 | |||
41 | #define A6_PDMA_IN_CFG 0x0028 | ||
42 | #define A6_PDMA_IN_BUF_CFG 0x002c | ||
43 | #define A6_PDMA_IN_CMD 0x0030 | ||
44 | #define A6_PDMA_IN_STATQ_PUSH 0x0038 | ||
45 | #define A6_PDMA_IN_DESCRQ_PUSH 0x0044 | ||
46 | #define A6_PDMA_IN_DESCRQ_STAT 0x0048 | ||
47 | #define A6_PDMA_INTR_MASK 0x0068 | ||
48 | #define A6_PDMA_ACK_INTR 0x006c | ||
49 | #define A6_PDMA_MASKED_INTR 0x0074 | ||
50 | |||
51 | #define A7_PDMA_IN_CFG 0x002c | ||
52 | #define A7_PDMA_IN_BUF_CFG 0x0030 | ||
53 | #define A7_PDMA_IN_CMD 0x0034 | ||
54 | #define A7_PDMA_IN_STATQ_PUSH 0x003c | ||
55 | #define A7_PDMA_IN_DESCRQ_PUSH 0x0048 | ||
56 | #define A7_PDMA_IN_DESCRQ_STAT 0x004C | ||
57 | #define A7_PDMA_INTR_MASK 0x006c | ||
58 | #define A7_PDMA_ACK_INTR 0x0070 | ||
59 | #define A7_PDMA_MASKED_INTR 0x0078 | ||
60 | |||
61 | #define PDMA_OUT_CFG_EN BIT(0) | ||
62 | |||
63 | #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0) | ||
64 | #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5) | ||
65 | |||
66 | #define PDMA_OUT_CMD_START BIT(0) | ||
67 | #define A6_PDMA_OUT_CMD_STOP BIT(3) | ||
68 | #define A7_PDMA_OUT_CMD_STOP BIT(2) | ||
69 | |||
70 | #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0) | ||
71 | #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6) | ||
72 | |||
73 | #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0) | ||
74 | #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4) | ||
75 | |||
76 | #define PDMA_IN_CFG_EN BIT(0) | ||
77 | |||
78 | #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0) | ||
79 | #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5) | ||
80 | #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10) | ||
81 | |||
82 | #define PDMA_IN_CMD_START BIT(0) | ||
83 | #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2) | ||
84 | #define A6_PDMA_IN_CMD_STOP BIT(3) | ||
85 | #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1) | ||
86 | #define A7_PDMA_IN_CMD_STOP BIT(2) | ||
87 | |||
88 | #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0) | ||
89 | #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6) | ||
90 | |||
91 | #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0) | ||
92 | #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6) | ||
93 | |||
94 | #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0) | ||
95 | #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4) | ||
96 | |||
97 | #define A6_PDMA_INTR_MASK_IN_DATA BIT(2) | ||
98 | #define A6_PDMA_INTR_MASK_IN_EOP BIT(3) | ||
99 | #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4) | ||
100 | |||
101 | #define A7_PDMA_INTR_MASK_IN_DATA BIT(3) | ||
102 | #define A7_PDMA_INTR_MASK_IN_EOP BIT(4) | ||
103 | #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5) | ||
104 | |||
105 | #define A6_CRY_MD_OPER GENMASK(19, 16) | ||
106 | |||
107 | #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20) | ||
108 | #define A6_CRY_MD_HASH_HMAC_FIN BIT(23) | ||
109 | |||
110 | #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20) | ||
111 | #define A6_CRY_MD_CIPHER_DECR BIT(22) | ||
112 | #define A6_CRY_MD_CIPHER_TWEAK BIT(23) | ||
113 | #define A6_CRY_MD_CIPHER_DSEQ BIT(24) | ||
114 | |||
115 | #define A7_CRY_MD_OPER GENMASK(11, 8) | ||
116 | |||
117 | #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12) | ||
118 | #define A7_CRY_MD_HASH_HMAC_FIN BIT(15) | ||
119 | |||
120 | #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12) | ||
121 | #define A7_CRY_MD_CIPHER_DECR BIT(14) | ||
122 | #define A7_CRY_MD_CIPHER_TWEAK BIT(15) | ||
123 | #define A7_CRY_MD_CIPHER_DSEQ BIT(16) | ||
124 | |||
125 | /* DMA metadata constants */ | ||
126 | #define regk_crypto_aes_cbc 0x00000002 | ||
127 | #define regk_crypto_aes_ctr 0x00000003 | ||
128 | #define regk_crypto_aes_ecb 0x00000001 | ||
129 | #define regk_crypto_aes_gcm 0x00000004 | ||
130 | #define regk_crypto_aes_xts 0x00000005 | ||
131 | #define regk_crypto_cache 0x00000002 | ||
132 | #define a6_regk_crypto_dlkey 0x0000000a | ||
133 | #define a7_regk_crypto_dlkey 0x0000000e | ||
134 | #define regk_crypto_ext 0x00000001 | ||
135 | #define regk_crypto_hmac_sha1 0x00000007 | ||
136 | #define regk_crypto_hmac_sha256 0x00000009 | ||
137 | #define regk_crypto_hmac_sha384 0x0000000b | ||
138 | #define regk_crypto_hmac_sha512 0x0000000d | ||
139 | #define regk_crypto_init 0x00000000 | ||
140 | #define regk_crypto_key_128 0x00000000 | ||
141 | #define regk_crypto_key_192 0x00000001 | ||
142 | #define regk_crypto_key_256 0x00000002 | ||
143 | #define regk_crypto_null 0x00000000 | ||
144 | #define regk_crypto_sha1 0x00000006 | ||
145 | #define regk_crypto_sha256 0x00000008 | ||
146 | #define regk_crypto_sha384 0x0000000a | ||
147 | #define regk_crypto_sha512 0x0000000c | ||
148 | |||
149 | /* DMA descriptor structures */ | ||
150 | struct pdma_descr_ctrl { | ||
151 | unsigned char short_descr : 1; | ||
152 | unsigned char pad1 : 1; | ||
153 | unsigned char eop : 1; | ||
154 | unsigned char intr : 1; | ||
155 | unsigned char short_len : 3; | ||
156 | unsigned char pad2 : 1; | ||
157 | } __packed; | ||
158 | |||
159 | struct pdma_data_descr { | ||
160 | unsigned int len : 24; | ||
161 | unsigned int buf : 32; | ||
162 | } __packed; | ||
163 | |||
164 | struct pdma_short_descr { | ||
165 | unsigned char data[7]; | ||
166 | } __packed; | ||
167 | |||
168 | struct pdma_descr { | ||
169 | struct pdma_descr_ctrl ctrl; | ||
170 | union { | ||
171 | struct pdma_data_descr data; | ||
172 | struct pdma_short_descr shrt; | ||
173 | }; | ||
174 | }; | ||
175 | |||
176 | struct pdma_stat_descr { | ||
177 | unsigned char pad1 : 1; | ||
178 | unsigned char pad2 : 1; | ||
179 | unsigned char eop : 1; | ||
180 | unsigned char pad3 : 5; | ||
181 | unsigned int len : 24; | ||
182 | }; | ||
183 | |||
184 | /* Each descriptor array can hold max 64 entries */ | ||
185 | #define PDMA_DESCR_COUNT 64 | ||
186 | |||
187 | #define MODULE_NAME "Artpec-6 CA" | ||
188 | |||
189 | /* Hash modes (including HMAC variants) */ | ||
190 | #define ARTPEC6_CRYPTO_HASH_SHA1 1 | ||
191 | #define ARTPEC6_CRYPTO_HASH_SHA256 2 | ||
192 | #define ARTPEC6_CRYPTO_HASH_SHA384 3 | ||
193 | #define ARTPEC6_CRYPTO_HASH_SHA512 4 | ||
194 | |||
195 | /* Crypto modes */ | ||
196 | #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1 | ||
197 | #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2 | ||
198 | #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3 | ||
199 | #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5 | ||
200 | |||
201 | /* The PDMA is a DMA-engine tightly coupled with a ciphering engine. | ||
202 | * It operates on a descriptor array with up to 64 descriptor entries. | ||
203 | * The arrays must be 64 byte aligned in memory. | ||
204 | * | ||
205 | * The ciphering unit has no registers and is completely controlled by | ||
206 | * a 4-byte metadata that is inserted at the beginning of each dma packet. | ||
207 | * | ||
208 | * A dma packet is a sequence of descriptors terminated by setting the .eop | ||
209 | * field in the final descriptor of the packet. | ||
210 | * | ||
211 | * Multiple packets are used for providing context data, key data and | ||
212 | * the plain/ciphertext. | ||
213 | * | ||
214 | * PDMA Descriptors (Array) | ||
215 | * +------+------+------+~~+-------+------+---- | ||
216 | * | 0 | 1 | 2 |~~| 11 EOP| 12 | .... | ||
217 | * +--+---+--+---+----+-+~~+-------+----+-+---- | ||
218 | * | | | | | | ||
219 | * | | | | | | ||
220 | * __|__ +-------++-------++-------+ +----+ | ||
221 | * | MD | |Payload||Payload||Payload| | MD | | ||
222 | * +-----+ +-------++-------++-------+ +----+ | ||
223 | */ | ||
224 | |||
225 | struct artpec6_crypto_bounce_buffer { | ||
226 | struct list_head list; | ||
227 | size_t length; | ||
228 | struct scatterlist *sg; | ||
229 | size_t offset; | ||
230 | /* buf is aligned to ARTPEC_CACHE_LINE_MAX and | ||
231 | * holds up to ARTPEC_CACHE_LINE_MAX bytes data. | ||
232 | */ | ||
233 | void *buf; | ||
234 | }; | ||
235 | |||
236 | struct artpec6_crypto_dma_map { | ||
237 | dma_addr_t dma_addr; | ||
238 | size_t size; | ||
239 | enum dma_data_direction dir; | ||
240 | }; | ||
241 | |||
242 | struct artpec6_crypto_dma_descriptors { | ||
243 | struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64); | ||
244 | struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64); | ||
245 | u32 stat[PDMA_DESCR_COUNT] __aligned(64); | ||
246 | struct list_head bounce_buffers; | ||
247 | /* Enough maps for all out/in buffers, and all three descr. arrays */ | ||
248 | struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2]; | ||
249 | dma_addr_t out_dma_addr; | ||
250 | dma_addr_t in_dma_addr; | ||
251 | dma_addr_t stat_dma_addr; | ||
252 | size_t out_cnt; | ||
253 | size_t in_cnt; | ||
254 | size_t map_count; | ||
255 | }; | ||
256 | |||
257 | enum artpec6_crypto_variant { | ||
258 | ARTPEC6_CRYPTO, | ||
259 | ARTPEC7_CRYPTO, | ||
260 | }; | ||
261 | |||
262 | struct artpec6_crypto { | ||
263 | void __iomem *base; | ||
264 | spinlock_t queue_lock; | ||
265 | struct list_head queue; /* waiting for pdma fifo space */ | ||
266 | struct list_head pending; /* submitted to pdma fifo */ | ||
267 | struct tasklet_struct task; | ||
268 | struct kmem_cache *dma_cache; | ||
269 | int pending_count; | ||
270 | struct timer_list timer; | ||
271 | enum artpec6_crypto_variant variant; | ||
272 | void *pad_buffer; /* cache-aligned block padding buffer */ | ||
273 | void *zero_buffer; | ||
274 | }; | ||
275 | |||
276 | enum artpec6_crypto_hash_flags { | ||
277 | HASH_FLAG_INIT_CTX = 2, | ||
278 | HASH_FLAG_UPDATE = 4, | ||
279 | HASH_FLAG_FINALIZE = 8, | ||
280 | HASH_FLAG_HMAC = 16, | ||
281 | HASH_FLAG_UPDATE_KEY = 32, | ||
282 | }; | ||
283 | |||
284 | struct artpec6_crypto_req_common { | ||
285 | struct list_head list; | ||
286 | struct artpec6_crypto_dma_descriptors *dma; | ||
287 | struct crypto_async_request *req; | ||
288 | void (*complete)(struct crypto_async_request *req); | ||
289 | gfp_t gfp_flags; | ||
290 | }; | ||
291 | |||
292 | struct artpec6_hash_request_context { | ||
293 | char partial_buffer[SHA512_BLOCK_SIZE]; | ||
294 | char partial_buffer_out[SHA512_BLOCK_SIZE]; | ||
295 | char key_buffer[SHA512_BLOCK_SIZE]; | ||
296 | char pad_buffer[SHA512_BLOCK_SIZE + 32]; | ||
297 | unsigned char digeststate[SHA512_DIGEST_SIZE]; | ||
298 | size_t partial_bytes; | ||
299 | u64 digcnt; | ||
300 | u32 key_md; | ||
301 | u32 hash_md; | ||
302 | enum artpec6_crypto_hash_flags hash_flags; | ||
303 | struct artpec6_crypto_req_common common; | ||
304 | }; | ||
305 | |||
306 | struct artpec6_hash_export_state { | ||
307 | char partial_buffer[SHA512_BLOCK_SIZE]; | ||
308 | unsigned char digeststate[SHA512_DIGEST_SIZE]; | ||
309 | size_t partial_bytes; | ||
310 | u64 digcnt; | ||
311 | int oper; | ||
312 | unsigned int hash_flags; | ||
313 | }; | ||
314 | |||
315 | struct artpec6_hashalg_context { | ||
316 | char hmac_key[SHA512_BLOCK_SIZE]; | ||
317 | size_t hmac_key_length; | ||
318 | struct crypto_shash *child_hash; | ||
319 | }; | ||
320 | |||
321 | struct artpec6_crypto_request_context { | ||
322 | u32 cipher_md; | ||
323 | bool decrypt; | ||
324 | struct artpec6_crypto_req_common common; | ||
325 | }; | ||
326 | |||
327 | struct artpec6_cryptotfm_context { | ||
328 | unsigned char aes_key[2*AES_MAX_KEY_SIZE]; | ||
329 | size_t key_length; | ||
330 | u32 key_md; | ||
331 | int crypto_type; | ||
332 | struct crypto_skcipher *fallback; | ||
333 | }; | ||
334 | |||
335 | struct artpec6_crypto_aead_hw_ctx { | ||
336 | __be64 aad_length_bits; | ||
337 | __be64 text_length_bits; | ||
338 | __u8 J0[AES_BLOCK_SIZE]; | ||
339 | }; | ||
340 | |||
341 | struct artpec6_crypto_aead_req_ctx { | ||
342 | struct artpec6_crypto_aead_hw_ctx hw_ctx; | ||
343 | u32 cipher_md; | ||
344 | bool decrypt; | ||
345 | struct artpec6_crypto_req_common common; | ||
346 | __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned; | ||
347 | }; | ||
348 | |||
349 | /* The crypto framework makes it hard to avoid this global. */ | ||
350 | static struct device *artpec6_crypto_dev; | ||
351 | |||
352 | static struct dentry *dbgfs_root; | ||
353 | |||
354 | #ifdef CONFIG_FAULT_INJECTION | ||
355 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); | ||
356 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); | ||
357 | #endif | ||
358 | |||
359 | enum { | ||
360 | ARTPEC6_CRYPTO_PREPARE_HASH_NO_START, | ||
361 | ARTPEC6_CRYPTO_PREPARE_HASH_START, | ||
362 | }; | ||
363 | |||
364 | static int artpec6_crypto_prepare_aead(struct aead_request *areq); | ||
365 | static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq); | ||
366 | static int artpec6_crypto_prepare_hash(struct ahash_request *areq); | ||
367 | |||
368 | static void | ||
369 | artpec6_crypto_complete_crypto(struct crypto_async_request *req); | ||
370 | static void | ||
371 | artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req); | ||
372 | static void | ||
373 | artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req); | ||
374 | static void | ||
375 | artpec6_crypto_complete_aead(struct crypto_async_request *req); | ||
376 | static void | ||
377 | artpec6_crypto_complete_hash(struct crypto_async_request *req); | ||
378 | |||
379 | static int | ||
380 | artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common); | ||
381 | |||
382 | static void | ||
383 | artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common); | ||
384 | |||
385 | struct artpec6_crypto_walk { | ||
386 | struct scatterlist *sg; | ||
387 | size_t offset; | ||
388 | }; | ||
389 | |||
390 | static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk, | ||
391 | struct scatterlist *sg) | ||
392 | { | ||
393 | awalk->sg = sg; | ||
394 | awalk->offset = 0; | ||
395 | } | ||
396 | |||
397 | static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk, | ||
398 | size_t nbytes) | ||
399 | { | ||
400 | while (nbytes && awalk->sg) { | ||
401 | size_t piece; | ||
402 | |||
403 | WARN_ON(awalk->offset > awalk->sg->length); | ||
404 | |||
405 | piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset); | ||
406 | nbytes -= piece; | ||
407 | awalk->offset += piece; | ||
408 | if (awalk->offset == awalk->sg->length) { | ||
409 | awalk->sg = sg_next(awalk->sg); | ||
410 | awalk->offset = 0; | ||
411 | } | ||
412 | |||
413 | } | ||
414 | |||
415 | return nbytes; | ||
416 | } | ||
417 | |||
418 | static size_t | ||
419 | artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk) | ||
420 | { | ||
421 | WARN_ON(awalk->sg->length == awalk->offset); | ||
422 | |||
423 | return awalk->sg->length - awalk->offset; | ||
424 | } | ||
425 | |||
426 | static dma_addr_t | ||
427 | artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk) | ||
428 | { | ||
429 | return sg_phys(awalk->sg) + awalk->offset; | ||
430 | } | ||
431 | |||
432 | static void | ||
433 | artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common) | ||
434 | { | ||
435 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
436 | struct artpec6_crypto_bounce_buffer *b; | ||
437 | struct artpec6_crypto_bounce_buffer *next; | ||
438 | |||
439 | list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) { | ||
440 | pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n", | ||
441 | b, b->length, b->offset, b->buf); | ||
442 | sg_pcopy_from_buffer(b->sg, | ||
443 | 1, | ||
444 | b->buf, | ||
445 | b->length, | ||
446 | b->offset); | ||
447 | |||
448 | list_del(&b->list); | ||
449 | kfree(b); | ||
450 | } | ||
451 | } | ||
452 | |||
453 | static inline bool artpec6_crypto_busy(void) | ||
454 | { | ||
455 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
456 | int fifo_count = ac->pending_count; | ||
457 | |||
458 | return fifo_count > 6; | ||
459 | } | ||
460 | |||
461 | static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req) | ||
462 | { | ||
463 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
464 | int ret = -EBUSY; | ||
465 | |||
466 | spin_lock_bh(&ac->queue_lock); | ||
467 | |||
468 | if (!artpec6_crypto_busy()) { | ||
469 | list_add_tail(&req->list, &ac->pending); | ||
470 | artpec6_crypto_start_dma(req); | ||
471 | ret = -EINPROGRESS; | ||
472 | } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) { | ||
473 | list_add_tail(&req->list, &ac->queue); | ||
474 | } else { | ||
475 | artpec6_crypto_common_destroy(req); | ||
476 | } | ||
477 | |||
478 | spin_unlock_bh(&ac->queue_lock); | ||
479 | |||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common) | ||
484 | { | ||
485 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
486 | enum artpec6_crypto_variant variant = ac->variant; | ||
487 | void __iomem *base = ac->base; | ||
488 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
489 | u32 ind, statd, outd; | ||
490 | |||
491 | /* Make descriptor content visible to the DMA before starting it. */ | ||
492 | wmb(); | ||
493 | |||
494 | ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) | | ||
495 | FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6); | ||
496 | |||
497 | statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) | | ||
498 | FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6); | ||
499 | |||
500 | outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) | | ||
501 | FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6); | ||
502 | |||
503 | if (variant == ARTPEC6_CRYPTO) { | ||
504 | writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH); | ||
505 | writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH); | ||
506 | writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD); | ||
507 | } else { | ||
508 | writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH); | ||
509 | writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH); | ||
510 | writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD); | ||
511 | } | ||
512 | |||
513 | writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH); | ||
514 | writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD); | ||
515 | |||
516 | ac->pending_count++; | ||
517 | } | ||
518 | |||
519 | static void | ||
520 | artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common) | ||
521 | { | ||
522 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
523 | |||
524 | dma->out_cnt = 0; | ||
525 | dma->in_cnt = 0; | ||
526 | dma->map_count = 0; | ||
527 | INIT_LIST_HEAD(&dma->bounce_buffers); | ||
528 | } | ||
529 | |||
530 | static bool fault_inject_dma_descr(void) | ||
531 | { | ||
532 | #ifdef CONFIG_FAULT_INJECTION | ||
533 | return should_fail(&artpec6_crypto_fail_dma_array_full, 1); | ||
534 | #else | ||
535 | return false; | ||
536 | #endif | ||
537 | } | ||
538 | |||
539 | /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a | ||
540 | * physical address | ||
541 | * | ||
542 | * @addr: The physical address of the data buffer | ||
543 | * @len: The length of the data buffer | ||
544 | * @eop: True if this is the last buffer in the packet | ||
545 | * | ||
546 | * @return 0 on success or -ENOSPC if there are no more descriptors available | ||
547 | */ | ||
548 | static int | ||
549 | artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common, | ||
550 | dma_addr_t addr, size_t len, bool eop) | ||
551 | { | ||
552 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
553 | struct pdma_descr *d; | ||
554 | |||
555 | if (dma->out_cnt >= PDMA_DESCR_COUNT || | ||
556 | fault_inject_dma_descr()) { | ||
557 | pr_err("No free OUT DMA descriptors available!\n"); | ||
558 | return -ENOSPC; | ||
559 | } | ||
560 | |||
561 | d = &dma->out[dma->out_cnt++]; | ||
562 | memset(d, 0, sizeof(*d)); | ||
563 | |||
564 | d->ctrl.short_descr = 0; | ||
565 | d->ctrl.eop = eop; | ||
566 | d->data.len = len; | ||
567 | d->data.buf = addr; | ||
568 | return 0; | ||
569 | } | ||
570 | |||
571 | /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor | ||
572 | * | ||
573 | * @dst: The virtual address of the data | ||
574 | * @len: The length of the data, must be between 1 to 7 bytes | ||
575 | * @eop: True if this is the last buffer in the packet | ||
576 | * | ||
577 | * @return 0 on success | ||
578 | * -ENOSPC if no more descriptors are available | ||
579 | * -EINVAL if the data length exceeds 7 bytes | ||
580 | */ | ||
581 | static int | ||
582 | artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common, | ||
583 | void *dst, unsigned int len, bool eop) | ||
584 | { | ||
585 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
586 | struct pdma_descr *d; | ||
587 | |||
588 | if (dma->out_cnt >= PDMA_DESCR_COUNT || | ||
589 | fault_inject_dma_descr()) { | ||
590 | pr_err("No free OUT DMA descriptors available!\n"); | ||
591 | return -ENOSPC; | ||
592 | } else if (len > 7 || len < 1) { | ||
593 | return -EINVAL; | ||
594 | } | ||
595 | d = &dma->out[dma->out_cnt++]; | ||
596 | memset(d, 0, sizeof(*d)); | ||
597 | |||
598 | d->ctrl.short_descr = 1; | ||
599 | d->ctrl.short_len = len; | ||
600 | d->ctrl.eop = eop; | ||
601 | memcpy(d->shrt.data, dst, len); | ||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common, | ||
606 | struct page *page, size_t offset, | ||
607 | size_t size, | ||
608 | enum dma_data_direction dir, | ||
609 | dma_addr_t *dma_addr_out) | ||
610 | { | ||
611 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
612 | struct device *dev = artpec6_crypto_dev; | ||
613 | struct artpec6_crypto_dma_map *map; | ||
614 | dma_addr_t dma_addr; | ||
615 | |||
616 | *dma_addr_out = 0; | ||
617 | |||
618 | if (dma->map_count >= ARRAY_SIZE(dma->maps)) | ||
619 | return -ENOMEM; | ||
620 | |||
621 | dma_addr = dma_map_page(dev, page, offset, size, dir); | ||
622 | if (dma_mapping_error(dev, dma_addr)) | ||
623 | return -ENOMEM; | ||
624 | |||
625 | map = &dma->maps[dma->map_count++]; | ||
626 | map->size = size; | ||
627 | map->dma_addr = dma_addr; | ||
628 | map->dir = dir; | ||
629 | |||
630 | *dma_addr_out = dma_addr; | ||
631 | |||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | static int | ||
636 | artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common, | ||
637 | void *ptr, size_t size, | ||
638 | enum dma_data_direction dir, | ||
639 | dma_addr_t *dma_addr_out) | ||
640 | { | ||
641 | struct page *page = virt_to_page(ptr); | ||
642 | size_t offset = (uintptr_t)ptr & ~PAGE_MASK; | ||
643 | |||
644 | return artpec6_crypto_dma_map_page(common, page, offset, size, dir, | ||
645 | dma_addr_out); | ||
646 | } | ||
647 | |||
648 | static int | ||
649 | artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common) | ||
650 | { | ||
651 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
652 | int ret; | ||
653 | |||
654 | ret = artpec6_crypto_dma_map_single(common, dma->in, | ||
655 | sizeof(dma->in[0]) * dma->in_cnt, | ||
656 | DMA_TO_DEVICE, &dma->in_dma_addr); | ||
657 | if (ret) | ||
658 | return ret; | ||
659 | |||
660 | ret = artpec6_crypto_dma_map_single(common, dma->out, | ||
661 | sizeof(dma->out[0]) * dma->out_cnt, | ||
662 | DMA_TO_DEVICE, &dma->out_dma_addr); | ||
663 | if (ret) | ||
664 | return ret; | ||
665 | |||
666 | /* We only read one stat descriptor */ | ||
667 | dma->stat[dma->in_cnt - 1] = 0; | ||
668 | |||
669 | /* | ||
670 | * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor | ||
671 | * to be written. | ||
672 | */ | ||
673 | return artpec6_crypto_dma_map_single(common, | ||
674 | dma->stat + dma->in_cnt - 1, | ||
675 | sizeof(dma->stat[0]), | ||
676 | DMA_BIDIRECTIONAL, | ||
677 | &dma->stat_dma_addr); | ||
678 | } | ||
679 | |||
680 | static void | ||
681 | artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common) | ||
682 | { | ||
683 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
684 | struct device *dev = artpec6_crypto_dev; | ||
685 | int i; | ||
686 | |||
687 | for (i = 0; i < dma->map_count; i++) { | ||
688 | struct artpec6_crypto_dma_map *map = &dma->maps[i]; | ||
689 | |||
690 | dma_unmap_page(dev, map->dma_addr, map->size, map->dir); | ||
691 | } | ||
692 | |||
693 | dma->map_count = 0; | ||
694 | } | ||
695 | |||
696 | /** artpec6_crypto_setup_out_descr - Setup an out descriptor | ||
697 | * | ||
698 | * @dst: The virtual address of the data | ||
699 | * @len: The length of the data | ||
700 | * @eop: True if this is the last buffer in the packet | ||
701 | * @use_short: If this is true and the data length is 7 bytes or less then | ||
702 | * a short descriptor will be used | ||
703 | * | ||
704 | * @return 0 on success | ||
705 | * Any errors from artpec6_crypto_setup_out_descr_short() or | ||
706 | * setup_out_descr_phys() | ||
707 | */ | ||
708 | static int | ||
709 | artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common, | ||
710 | void *dst, unsigned int len, bool eop, | ||
711 | bool use_short) | ||
712 | { | ||
713 | if (use_short && len < 7) { | ||
714 | return artpec6_crypto_setup_out_descr_short(common, dst, len, | ||
715 | eop); | ||
716 | } else { | ||
717 | int ret; | ||
718 | dma_addr_t dma_addr; | ||
719 | |||
720 | ret = artpec6_crypto_dma_map_single(common, dst, len, | ||
721 | DMA_TO_DEVICE, | ||
722 | &dma_addr); | ||
723 | if (ret) | ||
724 | return ret; | ||
725 | |||
726 | return artpec6_crypto_setup_out_descr_phys(common, dma_addr, | ||
727 | len, eop); | ||
728 | } | ||
729 | } | ||
730 | |||
731 | /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a | ||
732 | * physical address | ||
733 | * | ||
734 | * @addr: The physical address of the data buffer | ||
735 | * @len: The length of the data buffer | ||
736 | * @intr: True if an interrupt should be fired after HW processing of this | ||
737 | * descriptor | ||
738 | * | ||
739 | */ | ||
740 | static int | ||
741 | artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common, | ||
742 | dma_addr_t addr, unsigned int len, bool intr) | ||
743 | { | ||
744 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
745 | struct pdma_descr *d; | ||
746 | |||
747 | if (dma->in_cnt >= PDMA_DESCR_COUNT || | ||
748 | fault_inject_dma_descr()) { | ||
749 | pr_err("No free IN DMA descriptors available!\n"); | ||
750 | return -ENOSPC; | ||
751 | } | ||
752 | d = &dma->in[dma->in_cnt++]; | ||
753 | memset(d, 0, sizeof(*d)); | ||
754 | |||
755 | d->ctrl.intr = intr; | ||
756 | d->data.len = len; | ||
757 | d->data.buf = addr; | ||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor | ||
762 | * | ||
763 | * @buffer: The virtual address to of the data buffer | ||
764 | * @len: The length of the data buffer | ||
765 | * @last: If this is the last data buffer in the request (i.e. an interrupt | ||
766 | * is needed | ||
767 | * | ||
768 | * Short descriptors are not used for the in channel | ||
769 | */ | ||
770 | static int | ||
771 | artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common, | ||
772 | void *buffer, unsigned int len, bool last) | ||
773 | { | ||
774 | dma_addr_t dma_addr; | ||
775 | int ret; | ||
776 | |||
777 | ret = artpec6_crypto_dma_map_single(common, buffer, len, | ||
778 | DMA_FROM_DEVICE, &dma_addr); | ||
779 | if (ret) | ||
780 | return ret; | ||
781 | |||
782 | return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last); | ||
783 | } | ||
784 | |||
785 | static struct artpec6_crypto_bounce_buffer * | ||
786 | artpec6_crypto_alloc_bounce(gfp_t flags) | ||
787 | { | ||
788 | void *base; | ||
789 | size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) + | ||
790 | 2 * ARTPEC_CACHE_LINE_MAX; | ||
791 | struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags); | ||
792 | |||
793 | if (!bbuf) | ||
794 | return NULL; | ||
795 | |||
796 | base = bbuf + 1; | ||
797 | bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX); | ||
798 | return bbuf; | ||
799 | } | ||
800 | |||
801 | static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common, | ||
802 | struct artpec6_crypto_walk *walk, size_t size) | ||
803 | { | ||
804 | struct artpec6_crypto_bounce_buffer *bbuf; | ||
805 | int ret; | ||
806 | |||
807 | bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags); | ||
808 | if (!bbuf) | ||
809 | return -ENOMEM; | ||
810 | |||
811 | bbuf->length = size; | ||
812 | bbuf->sg = walk->sg; | ||
813 | bbuf->offset = walk->offset; | ||
814 | |||
815 | ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false); | ||
816 | if (ret) { | ||
817 | kfree(bbuf); | ||
818 | return ret; | ||
819 | } | ||
820 | |||
821 | pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset); | ||
822 | list_add_tail(&bbuf->list, &common->dma->bounce_buffers); | ||
823 | return 0; | ||
824 | } | ||
825 | |||
826 | static int | ||
827 | artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common, | ||
828 | struct artpec6_crypto_walk *walk, | ||
829 | size_t count) | ||
830 | { | ||
831 | size_t chunk; | ||
832 | int ret; | ||
833 | dma_addr_t addr; | ||
834 | |||
835 | while (walk->sg && count) { | ||
836 | chunk = min(count, artpec6_crypto_walk_chunklen(walk)); | ||
837 | addr = artpec6_crypto_walk_chunk_phys(walk); | ||
838 | |||
839 | /* When destination buffers are not aligned to the cache line | ||
840 | * size we need bounce buffers. The DMA-API requires that the | ||
841 | * entire line is owned by the DMA buffer and this holds also | ||
842 | * for the case when coherent DMA is used. | ||
843 | */ | ||
844 | if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) { | ||
845 | chunk = min_t(dma_addr_t, chunk, | ||
846 | ALIGN(addr, ARTPEC_CACHE_LINE_MAX) - | ||
847 | addr); | ||
848 | |||
849 | pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); | ||
850 | ret = setup_bounce_buffer_in(common, walk, chunk); | ||
851 | } else if (chunk < ARTPEC_CACHE_LINE_MAX) { | ||
852 | pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); | ||
853 | ret = setup_bounce_buffer_in(common, walk, chunk); | ||
854 | } else { | ||
855 | dma_addr_t dma_addr; | ||
856 | |||
857 | chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1); | ||
858 | |||
859 | pr_debug("CHUNK %pad:%zu\n", &addr, chunk); | ||
860 | |||
861 | ret = artpec6_crypto_dma_map_page(common, | ||
862 | sg_page(walk->sg), | ||
863 | walk->sg->offset + | ||
864 | walk->offset, | ||
865 | chunk, | ||
866 | DMA_FROM_DEVICE, | ||
867 | &dma_addr); | ||
868 | if (ret) | ||
869 | return ret; | ||
870 | |||
871 | ret = artpec6_crypto_setup_in_descr_phys(common, | ||
872 | dma_addr, | ||
873 | chunk, false); | ||
874 | } | ||
875 | |||
876 | if (ret) | ||
877 | return ret; | ||
878 | |||
879 | count = count - chunk; | ||
880 | artpec6_crypto_walk_advance(walk, chunk); | ||
881 | } | ||
882 | |||
883 | if (count) | ||
884 | pr_err("EOL unexpected %zu bytes left\n", count); | ||
885 | |||
886 | return count ? -EINVAL : 0; | ||
887 | } | ||
888 | |||
889 | static int | ||
890 | artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common, | ||
891 | struct artpec6_crypto_walk *walk, | ||
892 | size_t count) | ||
893 | { | ||
894 | size_t chunk; | ||
895 | int ret; | ||
896 | dma_addr_t addr; | ||
897 | |||
898 | while (walk->sg && count) { | ||
899 | chunk = min(count, artpec6_crypto_walk_chunklen(walk)); | ||
900 | addr = artpec6_crypto_walk_chunk_phys(walk); | ||
901 | |||
902 | pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk); | ||
903 | |||
904 | if (addr & 3) { | ||
905 | char buf[3]; | ||
906 | |||
907 | chunk = min_t(size_t, chunk, (4-(addr&3))); | ||
908 | |||
909 | sg_pcopy_to_buffer(walk->sg, 1, buf, chunk, | ||
910 | walk->offset); | ||
911 | |||
912 | ret = artpec6_crypto_setup_out_descr_short(common, buf, | ||
913 | chunk, | ||
914 | false); | ||
915 | } else { | ||
916 | dma_addr_t dma_addr; | ||
917 | |||
918 | ret = artpec6_crypto_dma_map_page(common, | ||
919 | sg_page(walk->sg), | ||
920 | walk->sg->offset + | ||
921 | walk->offset, | ||
922 | chunk, | ||
923 | DMA_TO_DEVICE, | ||
924 | &dma_addr); | ||
925 | if (ret) | ||
926 | return ret; | ||
927 | |||
928 | ret = artpec6_crypto_setup_out_descr_phys(common, | ||
929 | dma_addr, | ||
930 | chunk, false); | ||
931 | } | ||
932 | |||
933 | if (ret) | ||
934 | return ret; | ||
935 | |||
936 | count = count - chunk; | ||
937 | artpec6_crypto_walk_advance(walk, chunk); | ||
938 | } | ||
939 | |||
940 | if (count) | ||
941 | pr_err("EOL unexpected %zu bytes left\n", count); | ||
942 | |||
943 | return count ? -EINVAL : 0; | ||
944 | } | ||
945 | |||
946 | |||
947 | /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor | ||
948 | * | ||
949 | * If the out descriptor list is non-empty, then the eop flag on the | ||
950 | * last used out descriptor will be set. | ||
951 | * | ||
952 | * @return 0 on success | ||
953 | * -EINVAL if the out descriptor is empty or has overflown | ||
954 | */ | ||
955 | static int | ||
956 | artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common) | ||
957 | { | ||
958 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
959 | struct pdma_descr *d; | ||
960 | |||
961 | if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) { | ||
962 | pr_err("%s: OUT descriptor list is %s\n", | ||
963 | MODULE_NAME, dma->out_cnt ? "empty" : "full"); | ||
964 | return -EINVAL; | ||
965 | |||
966 | } | ||
967 | |||
968 | d = &dma->out[dma->out_cnt-1]; | ||
969 | d->ctrl.eop = 1; | ||
970 | |||
971 | return 0; | ||
972 | } | ||
973 | |||
974 | /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last | ||
975 | * in descriptor | ||
976 | * | ||
977 | * See artpec6_crypto_terminate_out_descrs() for return values | ||
978 | */ | ||
979 | static int | ||
980 | artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common) | ||
981 | { | ||
982 | struct artpec6_crypto_dma_descriptors *dma = common->dma; | ||
983 | struct pdma_descr *d; | ||
984 | |||
985 | if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) { | ||
986 | pr_err("%s: IN descriptor list is %s\n", | ||
987 | MODULE_NAME, dma->in_cnt ? "empty" : "full"); | ||
988 | return -EINVAL; | ||
989 | } | ||
990 | |||
991 | d = &dma->in[dma->in_cnt-1]; | ||
992 | d->ctrl.intr = 1; | ||
993 | return 0; | ||
994 | } | ||
995 | |||
996 | /** create_hash_pad - Create a Secure Hash conformant pad | ||
997 | * | ||
998 | * @dst: The destination buffer to write the pad. Must be at least 64 bytes | ||
999 | * @dgstlen: The total length of the hash digest in bytes | ||
1000 | * @bitcount: The total length of the digest in bits | ||
1001 | * | ||
1002 | * @return The total number of padding bytes written to @dst | ||
1003 | */ | ||
1004 | static size_t | ||
1005 | create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount) | ||
1006 | { | ||
1007 | unsigned int mod, target, diff, pad_bytes, size_bytes; | ||
1008 | __be64 bits = __cpu_to_be64(bitcount); | ||
1009 | |||
1010 | switch (oper) { | ||
1011 | case regk_crypto_sha1: | ||
1012 | case regk_crypto_sha256: | ||
1013 | case regk_crypto_hmac_sha1: | ||
1014 | case regk_crypto_hmac_sha256: | ||
1015 | target = 448 / 8; | ||
1016 | mod = 512 / 8; | ||
1017 | size_bytes = 8; | ||
1018 | break; | ||
1019 | default: | ||
1020 | target = 896 / 8; | ||
1021 | mod = 1024 / 8; | ||
1022 | size_bytes = 16; | ||
1023 | break; | ||
1024 | } | ||
1025 | |||
1026 | target -= 1; | ||
1027 | diff = dgstlen & (mod - 1); | ||
1028 | pad_bytes = diff > target ? target + mod - diff : target - diff; | ||
1029 | |||
1030 | memset(dst + 1, 0, pad_bytes); | ||
1031 | dst[0] = 0x80; | ||
1032 | |||
1033 | if (size_bytes == 16) { | ||
1034 | memset(dst + 1 + pad_bytes, 0, 8); | ||
1035 | memcpy(dst + 1 + pad_bytes + 8, &bits, 8); | ||
1036 | } else { | ||
1037 | memcpy(dst + 1 + pad_bytes, &bits, 8); | ||
1038 | } | ||
1039 | |||
1040 | return pad_bytes + size_bytes + 1; | ||
1041 | } | ||
1042 | |||
1043 | static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common, | ||
1044 | struct crypto_async_request *parent, | ||
1045 | void (*complete)(struct crypto_async_request *req), | ||
1046 | struct scatterlist *dstsg, unsigned int nbytes) | ||
1047 | { | ||
1048 | gfp_t flags; | ||
1049 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
1050 | |||
1051 | flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
1052 | GFP_KERNEL : GFP_ATOMIC; | ||
1053 | |||
1054 | common->gfp_flags = flags; | ||
1055 | common->dma = kmem_cache_alloc(ac->dma_cache, flags); | ||
1056 | if (!common->dma) | ||
1057 | return -ENOMEM; | ||
1058 | |||
1059 | common->req = parent; | ||
1060 | common->complete = complete; | ||
1061 | return 0; | ||
1062 | } | ||
1063 | |||
1064 | static void | ||
1065 | artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma) | ||
1066 | { | ||
1067 | struct artpec6_crypto_bounce_buffer *b; | ||
1068 | struct artpec6_crypto_bounce_buffer *next; | ||
1069 | |||
1070 | list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) { | ||
1071 | kfree(b); | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | static int | ||
1076 | artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common) | ||
1077 | { | ||
1078 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
1079 | |||
1080 | artpec6_crypto_dma_unmap_all(common); | ||
1081 | artpec6_crypto_bounce_destroy(common->dma); | ||
1082 | kmem_cache_free(ac->dma_cache, common->dma); | ||
1083 | common->dma = NULL; | ||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | /* | ||
1088 | * Ciphering functions. | ||
1089 | */ | ||
1090 | static int artpec6_crypto_encrypt(struct skcipher_request *req) | ||
1091 | { | ||
1092 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); | ||
1093 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); | ||
1094 | struct artpec6_crypto_request_context *req_ctx = NULL; | ||
1095 | void (*complete)(struct crypto_async_request *req); | ||
1096 | int ret; | ||
1097 | |||
1098 | req_ctx = skcipher_request_ctx(req); | ||
1099 | |||
1100 | switch (ctx->crypto_type) { | ||
1101 | case ARTPEC6_CRYPTO_CIPHER_AES_CBC: | ||
1102 | case ARTPEC6_CRYPTO_CIPHER_AES_ECB: | ||
1103 | case ARTPEC6_CRYPTO_CIPHER_AES_XTS: | ||
1104 | req_ctx->decrypt = 0; | ||
1105 | break; | ||
1106 | default: | ||
1107 | break; | ||
1108 | } | ||
1109 | |||
1110 | switch (ctx->crypto_type) { | ||
1111 | case ARTPEC6_CRYPTO_CIPHER_AES_CBC: | ||
1112 | complete = artpec6_crypto_complete_cbc_encrypt; | ||
1113 | break; | ||
1114 | default: | ||
1115 | complete = artpec6_crypto_complete_crypto; | ||
1116 | break; | ||
1117 | } | ||
1118 | |||
1119 | ret = artpec6_crypto_common_init(&req_ctx->common, | ||
1120 | &req->base, | ||
1121 | complete, | ||
1122 | req->dst, req->cryptlen); | ||
1123 | if (ret) | ||
1124 | return ret; | ||
1125 | |||
1126 | ret = artpec6_crypto_prepare_crypto(req); | ||
1127 | if (ret) { | ||
1128 | artpec6_crypto_common_destroy(&req_ctx->common); | ||
1129 | return ret; | ||
1130 | } | ||
1131 | |||
1132 | return artpec6_crypto_submit(&req_ctx->common); | ||
1133 | } | ||
1134 | |||
1135 | static int artpec6_crypto_decrypt(struct skcipher_request *req) | ||
1136 | { | ||
1137 | int ret; | ||
1138 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); | ||
1139 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); | ||
1140 | struct artpec6_crypto_request_context *req_ctx = NULL; | ||
1141 | void (*complete)(struct crypto_async_request *req); | ||
1142 | |||
1143 | req_ctx = skcipher_request_ctx(req); | ||
1144 | |||
1145 | switch (ctx->crypto_type) { | ||
1146 | case ARTPEC6_CRYPTO_CIPHER_AES_CBC: | ||
1147 | case ARTPEC6_CRYPTO_CIPHER_AES_ECB: | ||
1148 | case ARTPEC6_CRYPTO_CIPHER_AES_XTS: | ||
1149 | req_ctx->decrypt = 1; | ||
1150 | break; | ||
1151 | default: | ||
1152 | break; | ||
1153 | } | ||
1154 | |||
1155 | |||
1156 | switch (ctx->crypto_type) { | ||
1157 | case ARTPEC6_CRYPTO_CIPHER_AES_CBC: | ||
1158 | complete = artpec6_crypto_complete_cbc_decrypt; | ||
1159 | break; | ||
1160 | default: | ||
1161 | complete = artpec6_crypto_complete_crypto; | ||
1162 | break; | ||
1163 | } | ||
1164 | |||
1165 | ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, | ||
1166 | complete, | ||
1167 | req->dst, req->cryptlen); | ||
1168 | if (ret) | ||
1169 | return ret; | ||
1170 | |||
1171 | ret = artpec6_crypto_prepare_crypto(req); | ||
1172 | if (ret) { | ||
1173 | artpec6_crypto_common_destroy(&req_ctx->common); | ||
1174 | return ret; | ||
1175 | } | ||
1176 | |||
1177 | return artpec6_crypto_submit(&req_ctx->common); | ||
1178 | } | ||
1179 | |||
1180 | static int | ||
1181 | artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt) | ||
1182 | { | ||
1183 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); | ||
1184 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); | ||
1185 | size_t iv_len = crypto_skcipher_ivsize(cipher); | ||
1186 | unsigned int counter = be32_to_cpup((__be32 *) | ||
1187 | (req->iv + iv_len - 4)); | ||
1188 | unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / | ||
1189 | AES_BLOCK_SIZE; | ||
1190 | |||
1191 | /* | ||
1192 | * The hardware uses only the last 32-bits as the counter while the | ||
1193 | * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that | ||
1194 | * the whole IV is a counter. So fallback if the counter is going to | ||
1195 | * overlow. | ||
1196 | */ | ||
1197 | if (counter + nblks < counter) { | ||
1198 | int ret; | ||
1199 | |||
1200 | pr_debug("counter %x will overflow (nblks %u), falling back\n", | ||
1201 | counter, counter + nblks); | ||
1202 | |||
1203 | ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key, | ||
1204 | ctx->key_length); | ||
1205 | if (ret) | ||
1206 | return ret; | ||
1207 | |||
1208 | { | ||
1209 | SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); | ||
1210 | |||
1211 | skcipher_request_set_tfm(subreq, ctx->fallback); | ||
1212 | skcipher_request_set_callback(subreq, req->base.flags, | ||
1213 | NULL, NULL); | ||
1214 | skcipher_request_set_crypt(subreq, req->src, req->dst, | ||
1215 | req->cryptlen, req->iv); | ||
1216 | ret = encrypt ? crypto_skcipher_encrypt(subreq) | ||
1217 | : crypto_skcipher_decrypt(subreq); | ||
1218 | skcipher_request_zero(subreq); | ||
1219 | } | ||
1220 | return ret; | ||
1221 | } | ||
1222 | |||
1223 | return encrypt ? artpec6_crypto_encrypt(req) | ||
1224 | : artpec6_crypto_decrypt(req); | ||
1225 | } | ||
1226 | |||
1227 | static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req) | ||
1228 | { | ||
1229 | return artpec6_crypto_ctr_crypt(req, true); | ||
1230 | } | ||
1231 | |||
1232 | static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req) | ||
1233 | { | ||
1234 | return artpec6_crypto_ctr_crypt(req, false); | ||
1235 | } | ||
1236 | |||
1237 | /* | ||
1238 | * AEAD functions | ||
1239 | */ | ||
1240 | static int artpec6_crypto_aead_init(struct crypto_aead *tfm) | ||
1241 | { | ||
1242 | struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm); | ||
1243 | |||
1244 | memset(tfm_ctx, 0, sizeof(*tfm_ctx)); | ||
1245 | |||
1246 | crypto_aead_set_reqsize(tfm, | ||
1247 | sizeof(struct artpec6_crypto_aead_req_ctx)); | ||
1248 | |||
1249 | return 0; | ||
1250 | } | ||
1251 | |||
1252 | static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key, | ||
1253 | unsigned int len) | ||
1254 | { | ||
1255 | struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base); | ||
1256 | |||
1257 | if (len != 16 && len != 24 && len != 32) { | ||
1258 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1259 | return -1; | ||
1260 | } | ||
1261 | |||
1262 | ctx->key_length = len; | ||
1263 | |||
1264 | memcpy(ctx->aes_key, key, len); | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | static int artpec6_crypto_aead_encrypt(struct aead_request *req) | ||
1269 | { | ||
1270 | int ret; | ||
1271 | struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req); | ||
1272 | |||
1273 | req_ctx->decrypt = false; | ||
1274 | ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, | ||
1275 | artpec6_crypto_complete_aead, | ||
1276 | NULL, 0); | ||
1277 | if (ret) | ||
1278 | return ret; | ||
1279 | |||
1280 | ret = artpec6_crypto_prepare_aead(req); | ||
1281 | if (ret) { | ||
1282 | artpec6_crypto_common_destroy(&req_ctx->common); | ||
1283 | return ret; | ||
1284 | } | ||
1285 | |||
1286 | return artpec6_crypto_submit(&req_ctx->common); | ||
1287 | } | ||
1288 | |||
1289 | static int artpec6_crypto_aead_decrypt(struct aead_request *req) | ||
1290 | { | ||
1291 | int ret; | ||
1292 | struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req); | ||
1293 | |||
1294 | req_ctx->decrypt = true; | ||
1295 | if (req->cryptlen < AES_BLOCK_SIZE) | ||
1296 | return -EINVAL; | ||
1297 | |||
1298 | ret = artpec6_crypto_common_init(&req_ctx->common, | ||
1299 | &req->base, | ||
1300 | artpec6_crypto_complete_aead, | ||
1301 | NULL, 0); | ||
1302 | if (ret) | ||
1303 | return ret; | ||
1304 | |||
1305 | ret = artpec6_crypto_prepare_aead(req); | ||
1306 | if (ret) { | ||
1307 | artpec6_crypto_common_destroy(&req_ctx->common); | ||
1308 | return ret; | ||
1309 | } | ||
1310 | |||
1311 | return artpec6_crypto_submit(&req_ctx->common); | ||
1312 | } | ||
1313 | |||
1314 | static int artpec6_crypto_prepare_hash(struct ahash_request *areq) | ||
1315 | { | ||
1316 | struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm); | ||
1317 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq); | ||
1318 | size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); | ||
1319 | size_t contextsize = digestsize == SHA384_DIGEST_SIZE ? | ||
1320 | SHA512_DIGEST_SIZE : digestsize; | ||
1321 | size_t blocksize = crypto_tfm_alg_blocksize( | ||
1322 | crypto_ahash_tfm(crypto_ahash_reqtfm(areq))); | ||
1323 | struct artpec6_crypto_req_common *common = &req_ctx->common; | ||
1324 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
1325 | enum artpec6_crypto_variant variant = ac->variant; | ||
1326 | u32 sel_ctx; | ||
1327 | bool ext_ctx = false; | ||
1328 | bool run_hw = false; | ||
1329 | int error = 0; | ||
1330 | |||
1331 | artpec6_crypto_init_dma_operation(common); | ||
1332 | |||
1333 | /* Upload HMAC key, must be first the first packet */ | ||
1334 | if (req_ctx->hash_flags & HASH_FLAG_HMAC) { | ||
1335 | if (variant == ARTPEC6_CRYPTO) { | ||
1336 | req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, | ||
1337 | a6_regk_crypto_dlkey); | ||
1338 | } else { | ||
1339 | req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, | ||
1340 | a7_regk_crypto_dlkey); | ||
1341 | } | ||
1342 | |||
1343 | /* Copy and pad up the key */ | ||
1344 | memcpy(req_ctx->key_buffer, ctx->hmac_key, | ||
1345 | ctx->hmac_key_length); | ||
1346 | memset(req_ctx->key_buffer + ctx->hmac_key_length, 0, | ||
1347 | blocksize - ctx->hmac_key_length); | ||
1348 | |||
1349 | error = artpec6_crypto_setup_out_descr(common, | ||
1350 | (void *)&req_ctx->key_md, | ||
1351 | sizeof(req_ctx->key_md), false, false); | ||
1352 | if (error) | ||
1353 | return error; | ||
1354 | |||
1355 | error = artpec6_crypto_setup_out_descr(common, | ||
1356 | req_ctx->key_buffer, blocksize, | ||
1357 | true, false); | ||
1358 | if (error) | ||
1359 | return error; | ||
1360 | } | ||
1361 | |||
1362 | if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) { | ||
1363 | /* Restore context */ | ||
1364 | sel_ctx = regk_crypto_ext; | ||
1365 | ext_ctx = true; | ||
1366 | } else { | ||
1367 | sel_ctx = regk_crypto_init; | ||
1368 | } | ||
1369 | |||
1370 | if (variant == ARTPEC6_CRYPTO) { | ||
1371 | req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX; | ||
1372 | req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx); | ||
1373 | |||
1374 | /* If this is the final round, set the final flag */ | ||
1375 | if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) | ||
1376 | req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN; | ||
1377 | } else { | ||
1378 | req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX; | ||
1379 | req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx); | ||
1380 | |||
1381 | /* If this is the final round, set the final flag */ | ||
1382 | if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) | ||
1383 | req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN; | ||
1384 | } | ||
1385 | |||
1386 | /* Setup up metadata descriptors */ | ||
1387 | error = artpec6_crypto_setup_out_descr(common, | ||
1388 | (void *)&req_ctx->hash_md, | ||
1389 | sizeof(req_ctx->hash_md), false, false); | ||
1390 | if (error) | ||
1391 | return error; | ||
1392 | |||
1393 | error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); | ||
1394 | if (error) | ||
1395 | return error; | ||
1396 | |||
1397 | if (ext_ctx) { | ||
1398 | error = artpec6_crypto_setup_out_descr(common, | ||
1399 | req_ctx->digeststate, | ||
1400 | contextsize, false, false); | ||
1401 | |||
1402 | if (error) | ||
1403 | return error; | ||
1404 | } | ||
1405 | |||
1406 | if (req_ctx->hash_flags & HASH_FLAG_UPDATE) { | ||
1407 | size_t done_bytes = 0; | ||
1408 | size_t total_bytes = areq->nbytes + req_ctx->partial_bytes; | ||
1409 | size_t ready_bytes = round_down(total_bytes, blocksize); | ||
1410 | struct artpec6_crypto_walk walk; | ||
1411 | |||
1412 | run_hw = ready_bytes > 0; | ||
1413 | if (req_ctx->partial_bytes && ready_bytes) { | ||
1414 | /* We have a partial buffer and will at least some bytes | ||
1415 | * to the HW. Empty this partial buffer before tackling | ||
1416 | * the SG lists | ||
1417 | */ | ||
1418 | memcpy(req_ctx->partial_buffer_out, | ||
1419 | req_ctx->partial_buffer, | ||
1420 | req_ctx->partial_bytes); | ||
1421 | |||
1422 | error = artpec6_crypto_setup_out_descr(common, | ||
1423 | req_ctx->partial_buffer_out, | ||
1424 | req_ctx->partial_bytes, | ||
1425 | false, true); | ||
1426 | if (error) | ||
1427 | return error; | ||
1428 | |||
1429 | /* Reset partial buffer */ | ||
1430 | done_bytes += req_ctx->partial_bytes; | ||
1431 | req_ctx->partial_bytes = 0; | ||
1432 | } | ||
1433 | |||
1434 | artpec6_crypto_walk_init(&walk, areq->src); | ||
1435 | |||
1436 | error = artpec6_crypto_setup_sg_descrs_out(common, &walk, | ||
1437 | ready_bytes - | ||
1438 | done_bytes); | ||
1439 | if (error) | ||
1440 | return error; | ||
1441 | |||
1442 | if (walk.sg) { | ||
1443 | size_t sg_skip = ready_bytes - done_bytes; | ||
1444 | size_t sg_rem = areq->nbytes - sg_skip; | ||
1445 | |||
1446 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | ||
1447 | req_ctx->partial_buffer + | ||
1448 | req_ctx->partial_bytes, | ||
1449 | sg_rem, sg_skip); | ||
1450 | |||
1451 | req_ctx->partial_bytes += sg_rem; | ||
1452 | } | ||
1453 | |||
1454 | req_ctx->digcnt += ready_bytes; | ||
1455 | req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE); | ||
1456 | } | ||
1457 | |||
1458 | /* Finalize */ | ||
1459 | if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) { | ||
1460 | bool needtrim = contextsize != digestsize; | ||
1461 | size_t hash_pad_len; | ||
1462 | u64 digest_bits; | ||
1463 | u32 oper; | ||
1464 | |||
1465 | if (variant == ARTPEC6_CRYPTO) | ||
1466 | oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md); | ||
1467 | else | ||
1468 | oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md); | ||
1469 | |||
1470 | /* Write out the partial buffer if present */ | ||
1471 | if (req_ctx->partial_bytes) { | ||
1472 | memcpy(req_ctx->partial_buffer_out, | ||
1473 | req_ctx->partial_buffer, | ||
1474 | req_ctx->partial_bytes); | ||
1475 | error = artpec6_crypto_setup_out_descr(common, | ||
1476 | req_ctx->partial_buffer_out, | ||
1477 | req_ctx->partial_bytes, | ||
1478 | false, true); | ||
1479 | if (error) | ||
1480 | return error; | ||
1481 | |||
1482 | req_ctx->digcnt += req_ctx->partial_bytes; | ||
1483 | req_ctx->partial_bytes = 0; | ||
1484 | } | ||
1485 | |||
1486 | if (req_ctx->hash_flags & HASH_FLAG_HMAC) | ||
1487 | digest_bits = 8 * (req_ctx->digcnt + blocksize); | ||
1488 | else | ||
1489 | digest_bits = 8 * req_ctx->digcnt; | ||
1490 | |||
1491 | /* Add the hash pad */ | ||
1492 | hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer, | ||
1493 | req_ctx->digcnt, digest_bits); | ||
1494 | error = artpec6_crypto_setup_out_descr(common, | ||
1495 | req_ctx->pad_buffer, | ||
1496 | hash_pad_len, false, | ||
1497 | true); | ||
1498 | req_ctx->digcnt = 0; | ||
1499 | |||
1500 | if (error) | ||
1501 | return error; | ||
1502 | |||
1503 | /* Descriptor for the final result */ | ||
1504 | error = artpec6_crypto_setup_in_descr(common, areq->result, | ||
1505 | digestsize, | ||
1506 | !needtrim); | ||
1507 | if (error) | ||
1508 | return error; | ||
1509 | |||
1510 | if (needtrim) { | ||
1511 | /* Discard the extra context bytes for SHA-384 */ | ||
1512 | error = artpec6_crypto_setup_in_descr(common, | ||
1513 | req_ctx->partial_buffer, | ||
1514 | digestsize - contextsize, true); | ||
1515 | if (error) | ||
1516 | return error; | ||
1517 | } | ||
1518 | |||
1519 | } else { /* This is not the final operation for this request */ | ||
1520 | if (!run_hw) | ||
1521 | return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START; | ||
1522 | |||
1523 | /* Save the result to the context */ | ||
1524 | error = artpec6_crypto_setup_in_descr(common, | ||
1525 | req_ctx->digeststate, | ||
1526 | contextsize, false); | ||
1527 | if (error) | ||
1528 | return error; | ||
1529 | /* fall through */ | ||
1530 | } | ||
1531 | |||
1532 | req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE | | ||
1533 | HASH_FLAG_FINALIZE); | ||
1534 | |||
1535 | error = artpec6_crypto_terminate_in_descrs(common); | ||
1536 | if (error) | ||
1537 | return error; | ||
1538 | |||
1539 | error = artpec6_crypto_terminate_out_descrs(common); | ||
1540 | if (error) | ||
1541 | return error; | ||
1542 | |||
1543 | error = artpec6_crypto_dma_map_descs(common); | ||
1544 | if (error) | ||
1545 | return error; | ||
1546 | |||
1547 | return ARTPEC6_CRYPTO_PREPARE_HASH_START; | ||
1548 | } | ||
1549 | |||
1550 | |||
1551 | static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm) | ||
1552 | { | ||
1553 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); | ||
1554 | |||
1555 | tfm->reqsize = sizeof(struct artpec6_crypto_request_context); | ||
1556 | ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB; | ||
1557 | |||
1558 | return 0; | ||
1559 | } | ||
1560 | |||
1561 | static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm) | ||
1562 | { | ||
1563 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); | ||
1564 | |||
1565 | ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), | ||
1566 | 0, | ||
1567 | CRYPTO_ALG_ASYNC | | ||
1568 | CRYPTO_ALG_NEED_FALLBACK); | ||
1569 | if (IS_ERR(ctx->fallback)) | ||
1570 | return PTR_ERR(ctx->fallback); | ||
1571 | |||
1572 | tfm->reqsize = sizeof(struct artpec6_crypto_request_context); | ||
1573 | ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR; | ||
1574 | |||
1575 | return 0; | ||
1576 | } | ||
1577 | |||
1578 | static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm) | ||
1579 | { | ||
1580 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); | ||
1581 | |||
1582 | tfm->reqsize = sizeof(struct artpec6_crypto_request_context); | ||
1583 | ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC; | ||
1584 | |||
1585 | return 0; | ||
1586 | } | ||
1587 | |||
1588 | static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm) | ||
1589 | { | ||
1590 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); | ||
1591 | |||
1592 | tfm->reqsize = sizeof(struct artpec6_crypto_request_context); | ||
1593 | ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS; | ||
1594 | |||
1595 | return 0; | ||
1596 | } | ||
1597 | |||
1598 | static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm) | ||
1599 | { | ||
1600 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); | ||
1601 | |||
1602 | memset(ctx, 0, sizeof(*ctx)); | ||
1603 | } | ||
1604 | |||
1605 | static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm) | ||
1606 | { | ||
1607 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); | ||
1608 | |||
1609 | crypto_free_skcipher(ctx->fallback); | ||
1610 | artpec6_crypto_aes_exit(tfm); | ||
1611 | } | ||
1612 | |||
1613 | static int | ||
1614 | artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key, | ||
1615 | unsigned int keylen) | ||
1616 | { | ||
1617 | struct artpec6_cryptotfm_context *ctx = | ||
1618 | crypto_skcipher_ctx(cipher); | ||
1619 | |||
1620 | switch (keylen) { | ||
1621 | case 16: | ||
1622 | case 24: | ||
1623 | case 32: | ||
1624 | break; | ||
1625 | default: | ||
1626 | crypto_skcipher_set_flags(cipher, | ||
1627 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1628 | return -EINVAL; | ||
1629 | } | ||
1630 | |||
1631 | memcpy(ctx->aes_key, key, keylen); | ||
1632 | ctx->key_length = keylen; | ||
1633 | return 0; | ||
1634 | } | ||
1635 | |||
1636 | static int | ||
1637 | artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key, | ||
1638 | unsigned int keylen) | ||
1639 | { | ||
1640 | struct artpec6_cryptotfm_context *ctx = | ||
1641 | crypto_skcipher_ctx(cipher); | ||
1642 | int ret; | ||
1643 | |||
1644 | ret = xts_check_key(&cipher->base, key, keylen); | ||
1645 | if (ret) | ||
1646 | return ret; | ||
1647 | |||
1648 | switch (keylen) { | ||
1649 | case 32: | ||
1650 | case 48: | ||
1651 | case 64: | ||
1652 | break; | ||
1653 | default: | ||
1654 | crypto_skcipher_set_flags(cipher, | ||
1655 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1656 | return -EINVAL; | ||
1657 | } | ||
1658 | |||
1659 | memcpy(ctx->aes_key, key, keylen); | ||
1660 | ctx->key_length = keylen; | ||
1661 | return 0; | ||
1662 | } | ||
1663 | |||
1664 | /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request | ||
1665 | * | ||
1666 | * @req: The asynch request to process | ||
1667 | * | ||
1668 | * @return 0 if the dma job was successfully prepared | ||
1669 | * <0 on error | ||
1670 | * | ||
1671 | * This function sets up the PDMA descriptors for a block cipher request. | ||
1672 | * | ||
1673 | * The required padding is added for AES-CTR using a statically defined | ||
1674 | * buffer. | ||
1675 | * | ||
1676 | * The PDMA descriptor list will be as follows: | ||
1677 | * | ||
1678 | * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop> | ||
1679 | * IN: <CIPHER_MD><data_0>...[data_n]<intr> | ||
1680 | * | ||
1681 | */ | ||
1682 | static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq) | ||
1683 | { | ||
1684 | int ret; | ||
1685 | struct artpec6_crypto_walk walk; | ||
1686 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); | ||
1687 | struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); | ||
1688 | struct artpec6_crypto_request_context *req_ctx = NULL; | ||
1689 | size_t iv_len = crypto_skcipher_ivsize(cipher); | ||
1690 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
1691 | enum artpec6_crypto_variant variant = ac->variant; | ||
1692 | struct artpec6_crypto_req_common *common; | ||
1693 | bool cipher_decr = false; | ||
1694 | size_t cipher_klen; | ||
1695 | u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */ | ||
1696 | u32 oper; | ||
1697 | |||
1698 | req_ctx = skcipher_request_ctx(areq); | ||
1699 | common = &req_ctx->common; | ||
1700 | |||
1701 | artpec6_crypto_init_dma_operation(common); | ||
1702 | |||
1703 | if (variant == ARTPEC6_CRYPTO) | ||
1704 | ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey); | ||
1705 | else | ||
1706 | ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey); | ||
1707 | |||
1708 | ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md, | ||
1709 | sizeof(ctx->key_md), false, false); | ||
1710 | if (ret) | ||
1711 | return ret; | ||
1712 | |||
1713 | ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key, | ||
1714 | ctx->key_length, true, false); | ||
1715 | if (ret) | ||
1716 | return ret; | ||
1717 | |||
1718 | req_ctx->cipher_md = 0; | ||
1719 | |||
1720 | if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) | ||
1721 | cipher_klen = ctx->key_length/2; | ||
1722 | else | ||
1723 | cipher_klen = ctx->key_length; | ||
1724 | |||
1725 | /* Metadata */ | ||
1726 | switch (cipher_klen) { | ||
1727 | case 16: | ||
1728 | cipher_len = regk_crypto_key_128; | ||
1729 | break; | ||
1730 | case 24: | ||
1731 | cipher_len = regk_crypto_key_192; | ||
1732 | break; | ||
1733 | case 32: | ||
1734 | cipher_len = regk_crypto_key_256; | ||
1735 | break; | ||
1736 | default: | ||
1737 | pr_err("%s: Invalid key length %d!\n", | ||
1738 | MODULE_NAME, ctx->key_length); | ||
1739 | return -EINVAL; | ||
1740 | } | ||
1741 | |||
1742 | switch (ctx->crypto_type) { | ||
1743 | case ARTPEC6_CRYPTO_CIPHER_AES_ECB: | ||
1744 | oper = regk_crypto_aes_ecb; | ||
1745 | cipher_decr = req_ctx->decrypt; | ||
1746 | break; | ||
1747 | |||
1748 | case ARTPEC6_CRYPTO_CIPHER_AES_CBC: | ||
1749 | oper = regk_crypto_aes_cbc; | ||
1750 | cipher_decr = req_ctx->decrypt; | ||
1751 | break; | ||
1752 | |||
1753 | case ARTPEC6_CRYPTO_CIPHER_AES_CTR: | ||
1754 | oper = regk_crypto_aes_ctr; | ||
1755 | cipher_decr = false; | ||
1756 | break; | ||
1757 | |||
1758 | case ARTPEC6_CRYPTO_CIPHER_AES_XTS: | ||
1759 | oper = regk_crypto_aes_xts; | ||
1760 | cipher_decr = req_ctx->decrypt; | ||
1761 | |||
1762 | if (variant == ARTPEC6_CRYPTO) | ||
1763 | req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ; | ||
1764 | else | ||
1765 | req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ; | ||
1766 | break; | ||
1767 | |||
1768 | default: | ||
1769 | pr_err("%s: Invalid cipher mode %d!\n", | ||
1770 | MODULE_NAME, ctx->crypto_type); | ||
1771 | return -EINVAL; | ||
1772 | } | ||
1773 | |||
1774 | if (variant == ARTPEC6_CRYPTO) { | ||
1775 | req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper); | ||
1776 | req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN, | ||
1777 | cipher_len); | ||
1778 | if (cipher_decr) | ||
1779 | req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR; | ||
1780 | } else { | ||
1781 | req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper); | ||
1782 | req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN, | ||
1783 | cipher_len); | ||
1784 | if (cipher_decr) | ||
1785 | req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR; | ||
1786 | } | ||
1787 | |||
1788 | ret = artpec6_crypto_setup_out_descr(common, | ||
1789 | &req_ctx->cipher_md, | ||
1790 | sizeof(req_ctx->cipher_md), | ||
1791 | false, false); | ||
1792 | if (ret) | ||
1793 | return ret; | ||
1794 | |||
1795 | ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); | ||
1796 | if (ret) | ||
1797 | return ret; | ||
1798 | |||
1799 | if (iv_len) { | ||
1800 | ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len, | ||
1801 | false, false); | ||
1802 | if (ret) | ||
1803 | return ret; | ||
1804 | } | ||
1805 | /* Data out */ | ||
1806 | artpec6_crypto_walk_init(&walk, areq->src); | ||
1807 | ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen); | ||
1808 | if (ret) | ||
1809 | return ret; | ||
1810 | |||
1811 | /* Data in */ | ||
1812 | artpec6_crypto_walk_init(&walk, areq->dst); | ||
1813 | ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen); | ||
1814 | if (ret) | ||
1815 | return ret; | ||
1816 | |||
1817 | /* CTR-mode padding required by the HW. */ | ||
1818 | if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR || | ||
1819 | ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) { | ||
1820 | size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) - | ||
1821 | areq->cryptlen; | ||
1822 | |||
1823 | if (pad) { | ||
1824 | ret = artpec6_crypto_setup_out_descr(common, | ||
1825 | ac->pad_buffer, | ||
1826 | pad, false, false); | ||
1827 | if (ret) | ||
1828 | return ret; | ||
1829 | |||
1830 | ret = artpec6_crypto_setup_in_descr(common, | ||
1831 | ac->pad_buffer, pad, | ||
1832 | false); | ||
1833 | if (ret) | ||
1834 | return ret; | ||
1835 | } | ||
1836 | } | ||
1837 | |||
1838 | ret = artpec6_crypto_terminate_out_descrs(common); | ||
1839 | if (ret) | ||
1840 | return ret; | ||
1841 | |||
1842 | ret = artpec6_crypto_terminate_in_descrs(common); | ||
1843 | if (ret) | ||
1844 | return ret; | ||
1845 | |||
1846 | return artpec6_crypto_dma_map_descs(common); | ||
1847 | } | ||
1848 | |||
1849 | static int artpec6_crypto_prepare_aead(struct aead_request *areq) | ||
1850 | { | ||
1851 | size_t count; | ||
1852 | int ret; | ||
1853 | size_t input_length; | ||
1854 | struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm); | ||
1855 | struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); | ||
1856 | struct crypto_aead *cipher = crypto_aead_reqtfm(areq); | ||
1857 | struct artpec6_crypto_req_common *common = &req_ctx->common; | ||
1858 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
1859 | enum artpec6_crypto_variant variant = ac->variant; | ||
1860 | u32 md_cipher_len; | ||
1861 | |||
1862 | artpec6_crypto_init_dma_operation(common); | ||
1863 | |||
1864 | /* Key */ | ||
1865 | if (variant == ARTPEC6_CRYPTO) { | ||
1866 | ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, | ||
1867 | a6_regk_crypto_dlkey); | ||
1868 | } else { | ||
1869 | ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, | ||
1870 | a7_regk_crypto_dlkey); | ||
1871 | } | ||
1872 | ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md, | ||
1873 | sizeof(ctx->key_md), false, false); | ||
1874 | if (ret) | ||
1875 | return ret; | ||
1876 | |||
1877 | ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key, | ||
1878 | ctx->key_length, true, false); | ||
1879 | if (ret) | ||
1880 | return ret; | ||
1881 | |||
1882 | req_ctx->cipher_md = 0; | ||
1883 | |||
1884 | switch (ctx->key_length) { | ||
1885 | case 16: | ||
1886 | md_cipher_len = regk_crypto_key_128; | ||
1887 | break; | ||
1888 | case 24: | ||
1889 | md_cipher_len = regk_crypto_key_192; | ||
1890 | break; | ||
1891 | case 32: | ||
1892 | md_cipher_len = regk_crypto_key_256; | ||
1893 | break; | ||
1894 | default: | ||
1895 | return -EINVAL; | ||
1896 | } | ||
1897 | |||
1898 | if (variant == ARTPEC6_CRYPTO) { | ||
1899 | req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, | ||
1900 | regk_crypto_aes_gcm); | ||
1901 | req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN, | ||
1902 | md_cipher_len); | ||
1903 | if (req_ctx->decrypt) | ||
1904 | req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR; | ||
1905 | } else { | ||
1906 | req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, | ||
1907 | regk_crypto_aes_gcm); | ||
1908 | req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN, | ||
1909 | md_cipher_len); | ||
1910 | if (req_ctx->decrypt) | ||
1911 | req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR; | ||
1912 | } | ||
1913 | |||
1914 | ret = artpec6_crypto_setup_out_descr(common, | ||
1915 | (void *) &req_ctx->cipher_md, | ||
1916 | sizeof(req_ctx->cipher_md), false, | ||
1917 | false); | ||
1918 | if (ret) | ||
1919 | return ret; | ||
1920 | |||
1921 | ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); | ||
1922 | if (ret) | ||
1923 | return ret; | ||
1924 | |||
1925 | /* For the decryption, cryptlen includes the tag. */ | ||
1926 | input_length = areq->cryptlen; | ||
1927 | if (req_ctx->decrypt) | ||
1928 | input_length -= AES_BLOCK_SIZE; | ||
1929 | |||
1930 | /* Prepare the context buffer */ | ||
1931 | req_ctx->hw_ctx.aad_length_bits = | ||
1932 | __cpu_to_be64(8*areq->assoclen); | ||
1933 | |||
1934 | req_ctx->hw_ctx.text_length_bits = | ||
1935 | __cpu_to_be64(8*input_length); | ||
1936 | |||
1937 | memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); | ||
1938 | // The HW omits the initial increment of the counter field. | ||
1939 | crypto_inc(req_ctx->hw_ctx.J0+12, 4); | ||
1940 | |||
1941 | ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, | ||
1942 | sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); | ||
1943 | if (ret) | ||
1944 | return ret; | ||
1945 | |||
1946 | { | ||
1947 | struct artpec6_crypto_walk walk; | ||
1948 | |||
1949 | artpec6_crypto_walk_init(&walk, areq->src); | ||
1950 | |||
1951 | /* Associated data */ | ||
1952 | count = areq->assoclen; | ||
1953 | ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); | ||
1954 | if (ret) | ||
1955 | return ret; | ||
1956 | |||
1957 | if (!IS_ALIGNED(areq->assoclen, 16)) { | ||
1958 | size_t assoc_pad = 16 - (areq->assoclen % 16); | ||
1959 | /* The HW mandates zero padding here */ | ||
1960 | ret = artpec6_crypto_setup_out_descr(common, | ||
1961 | ac->zero_buffer, | ||
1962 | assoc_pad, false, | ||
1963 | false); | ||
1964 | if (ret) | ||
1965 | return ret; | ||
1966 | } | ||
1967 | |||
1968 | /* Data to crypto */ | ||
1969 | count = input_length; | ||
1970 | ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); | ||
1971 | if (ret) | ||
1972 | return ret; | ||
1973 | |||
1974 | if (!IS_ALIGNED(input_length, 16)) { | ||
1975 | size_t crypto_pad = 16 - (input_length % 16); | ||
1976 | /* The HW mandates zero padding here */ | ||
1977 | ret = artpec6_crypto_setup_out_descr(common, | ||
1978 | ac->zero_buffer, | ||
1979 | crypto_pad, | ||
1980 | false, | ||
1981 | false); | ||
1982 | if (ret) | ||
1983 | return ret; | ||
1984 | } | ||
1985 | } | ||
1986 | |||
1987 | /* Data from crypto */ | ||
1988 | { | ||
1989 | struct artpec6_crypto_walk walk; | ||
1990 | size_t output_len = areq->cryptlen; | ||
1991 | |||
1992 | if (req_ctx->decrypt) | ||
1993 | output_len -= AES_BLOCK_SIZE; | ||
1994 | |||
1995 | artpec6_crypto_walk_init(&walk, areq->dst); | ||
1996 | |||
1997 | /* skip associated data in the output */ | ||
1998 | count = artpec6_crypto_walk_advance(&walk, areq->assoclen); | ||
1999 | if (count) | ||
2000 | return -EINVAL; | ||
2001 | |||
2002 | count = output_len; | ||
2003 | ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count); | ||
2004 | if (ret) | ||
2005 | return ret; | ||
2006 | |||
2007 | /* Put padding between the cryptotext and the auth tag */ | ||
2008 | if (!IS_ALIGNED(output_len, 16)) { | ||
2009 | size_t crypto_pad = 16 - (output_len % 16); | ||
2010 | |||
2011 | ret = artpec6_crypto_setup_in_descr(common, | ||
2012 | ac->pad_buffer, | ||
2013 | crypto_pad, false); | ||
2014 | if (ret) | ||
2015 | return ret; | ||
2016 | } | ||
2017 | |||
2018 | /* The authentication tag shall follow immediately after | ||
2019 | * the output ciphertext. For decryption it is put in a context | ||
2020 | * buffer for later compare against the input tag. | ||
2021 | */ | ||
2022 | count = AES_BLOCK_SIZE; | ||
2023 | |||
2024 | if (req_ctx->decrypt) { | ||
2025 | ret = artpec6_crypto_setup_in_descr(common, | ||
2026 | req_ctx->decryption_tag, count, false); | ||
2027 | if (ret) | ||
2028 | return ret; | ||
2029 | |||
2030 | } else { | ||
2031 | ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, | ||
2032 | count); | ||
2033 | if (ret) | ||
2034 | return ret; | ||
2035 | } | ||
2036 | |||
2037 | } | ||
2038 | |||
2039 | ret = artpec6_crypto_terminate_in_descrs(common); | ||
2040 | if (ret) | ||
2041 | return ret; | ||
2042 | |||
2043 | ret = artpec6_crypto_terminate_out_descrs(common); | ||
2044 | if (ret) | ||
2045 | return ret; | ||
2046 | |||
2047 | return artpec6_crypto_dma_map_descs(common); | ||
2048 | } | ||
2049 | |||
2050 | static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) | ||
2051 | { | ||
2052 | struct artpec6_crypto_req_common *req; | ||
2053 | |||
2054 | while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) { | ||
2055 | req = list_first_entry(&ac->queue, | ||
2056 | struct artpec6_crypto_req_common, | ||
2057 | list); | ||
2058 | list_move_tail(&req->list, &ac->pending); | ||
2059 | artpec6_crypto_start_dma(req); | ||
2060 | |||
2061 | req->req->complete(req->req, -EINPROGRESS); | ||
2062 | } | ||
2063 | |||
2064 | /* | ||
2065 | * In some cases, the hardware can raise an in_eop_flush interrupt | ||
2066 | * before actually updating the status, so we have an timer which will | ||
2067 | * recheck the status on timeout. Since the cases are expected to be | ||
2068 | * very rare, we use a relatively large timeout value. There should be | ||
2069 | * no noticeable negative effect if we timeout spuriously. | ||
2070 | */ | ||
2071 | if (ac->pending_count) | ||
2072 | mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100)); | ||
2073 | else | ||
2074 | del_timer(&ac->timer); | ||
2075 | } | ||
2076 | |||
2077 | static void artpec6_crypto_timeout(unsigned long data) | ||
2078 | { | ||
2079 | struct artpec6_crypto *ac = (struct artpec6_crypto *) data; | ||
2080 | |||
2081 | dev_info_ratelimited(artpec6_crypto_dev, "timeout\n"); | ||
2082 | |||
2083 | tasklet_schedule(&ac->task); | ||
2084 | } | ||
2085 | |||
2086 | static void artpec6_crypto_task(unsigned long data) | ||
2087 | { | ||
2088 | struct artpec6_crypto *ac = (struct artpec6_crypto *)data; | ||
2089 | struct artpec6_crypto_req_common *req; | ||
2090 | struct artpec6_crypto_req_common *n; | ||
2091 | |||
2092 | if (list_empty(&ac->pending)) { | ||
2093 | pr_debug("Spurious IRQ\n"); | ||
2094 | return; | ||
2095 | } | ||
2096 | |||
2097 | spin_lock_bh(&ac->queue_lock); | ||
2098 | |||
2099 | list_for_each_entry_safe(req, n, &ac->pending, list) { | ||
2100 | struct artpec6_crypto_dma_descriptors *dma = req->dma; | ||
2101 | u32 stat; | ||
2102 | |||
2103 | dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr, | ||
2104 | sizeof(dma->stat[0]), | ||
2105 | DMA_BIDIRECTIONAL); | ||
2106 | |||
2107 | stat = req->dma->stat[req->dma->in_cnt-1]; | ||
2108 | |||
2109 | /* A non-zero final status descriptor indicates | ||
2110 | * this job has finished. | ||
2111 | */ | ||
2112 | pr_debug("Request %p status is %X\n", req, stat); | ||
2113 | if (!stat) | ||
2114 | break; | ||
2115 | |||
2116 | /* Allow testing of timeout handling with fault injection */ | ||
2117 | #ifdef CONFIG_FAULT_INJECTION | ||
2118 | if (should_fail(&artpec6_crypto_fail_status_read, 1)) | ||
2119 | continue; | ||
2120 | #endif | ||
2121 | |||
2122 | pr_debug("Completing request %p\n", req); | ||
2123 | |||
2124 | list_del(&req->list); | ||
2125 | |||
2126 | artpec6_crypto_dma_unmap_all(req); | ||
2127 | artpec6_crypto_copy_bounce_buffers(req); | ||
2128 | |||
2129 | ac->pending_count--; | ||
2130 | artpec6_crypto_common_destroy(req); | ||
2131 | req->complete(req->req); | ||
2132 | } | ||
2133 | |||
2134 | artpec6_crypto_process_queue(ac); | ||
2135 | |||
2136 | spin_unlock_bh(&ac->queue_lock); | ||
2137 | } | ||
2138 | |||
2139 | static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) | ||
2140 | { | ||
2141 | req->complete(req, 0); | ||
2142 | } | ||
2143 | |||
2144 | static void | ||
2145 | artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req) | ||
2146 | { | ||
2147 | struct skcipher_request *cipher_req = container_of(req, | ||
2148 | struct skcipher_request, base); | ||
2149 | |||
2150 | scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src, | ||
2151 | cipher_req->cryptlen - AES_BLOCK_SIZE, | ||
2152 | AES_BLOCK_SIZE, 0); | ||
2153 | req->complete(req, 0); | ||
2154 | } | ||
2155 | |||
2156 | static void | ||
2157 | artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req) | ||
2158 | { | ||
2159 | struct skcipher_request *cipher_req = container_of(req, | ||
2160 | struct skcipher_request, base); | ||
2161 | |||
2162 | scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst, | ||
2163 | cipher_req->cryptlen - AES_BLOCK_SIZE, | ||
2164 | AES_BLOCK_SIZE, 0); | ||
2165 | req->complete(req, 0); | ||
2166 | } | ||
2167 | |||
2168 | static void artpec6_crypto_complete_aead(struct crypto_async_request *req) | ||
2169 | { | ||
2170 | int result = 0; | ||
2171 | |||
2172 | /* Verify GCM hashtag. */ | ||
2173 | struct aead_request *areq = container_of(req, | ||
2174 | struct aead_request, base); | ||
2175 | struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); | ||
2176 | |||
2177 | if (req_ctx->decrypt) { | ||
2178 | u8 input_tag[AES_BLOCK_SIZE]; | ||
2179 | |||
2180 | sg_pcopy_to_buffer(areq->src, | ||
2181 | sg_nents(areq->src), | ||
2182 | input_tag, | ||
2183 | AES_BLOCK_SIZE, | ||
2184 | areq->assoclen + areq->cryptlen - | ||
2185 | AES_BLOCK_SIZE); | ||
2186 | |||
2187 | if (memcmp(req_ctx->decryption_tag, | ||
2188 | input_tag, | ||
2189 | AES_BLOCK_SIZE)) { | ||
2190 | pr_debug("***EBADMSG:\n"); | ||
2191 | print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1, | ||
2192 | input_tag, AES_BLOCK_SIZE, true); | ||
2193 | print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1, | ||
2194 | req_ctx->decryption_tag, | ||
2195 | AES_BLOCK_SIZE, true); | ||
2196 | |||
2197 | result = -EBADMSG; | ||
2198 | } | ||
2199 | } | ||
2200 | |||
2201 | req->complete(req, result); | ||
2202 | } | ||
2203 | |||
2204 | static void artpec6_crypto_complete_hash(struct crypto_async_request *req) | ||
2205 | { | ||
2206 | req->complete(req, 0); | ||
2207 | } | ||
2208 | |||
2209 | |||
2210 | /*------------------- Hash functions -----------------------------------------*/ | ||
2211 | static int | ||
2212 | artpec6_crypto_hash_set_key(struct crypto_ahash *tfm, | ||
2213 | const u8 *key, unsigned int keylen) | ||
2214 | { | ||
2215 | struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base); | ||
2216 | size_t blocksize; | ||
2217 | int ret; | ||
2218 | |||
2219 | if (!keylen) { | ||
2220 | pr_err("Invalid length (%d) of HMAC key\n", | ||
2221 | keylen); | ||
2222 | return -EINVAL; | ||
2223 | } | ||
2224 | |||
2225 | memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key)); | ||
2226 | |||
2227 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
2228 | |||
2229 | if (keylen > blocksize) { | ||
2230 | SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash); | ||
2231 | |||
2232 | hdesc->tfm = tfm_ctx->child_hash; | ||
2233 | hdesc->flags = crypto_ahash_get_flags(tfm) & | ||
2234 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
2235 | |||
2236 | tfm_ctx->hmac_key_length = blocksize; | ||
2237 | ret = crypto_shash_digest(hdesc, key, keylen, | ||
2238 | tfm_ctx->hmac_key); | ||
2239 | if (ret) | ||
2240 | return ret; | ||
2241 | |||
2242 | } else { | ||
2243 | memcpy(tfm_ctx->hmac_key, key, keylen); | ||
2244 | tfm_ctx->hmac_key_length = keylen; | ||
2245 | } | ||
2246 | |||
2247 | return 0; | ||
2248 | } | ||
2249 | |||
2250 | static int | ||
2251 | artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac) | ||
2252 | { | ||
2253 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
2254 | enum artpec6_crypto_variant variant = ac->variant; | ||
2255 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2256 | u32 oper; | ||
2257 | |||
2258 | memset(req_ctx, 0, sizeof(*req_ctx)); | ||
2259 | |||
2260 | req_ctx->hash_flags = HASH_FLAG_INIT_CTX; | ||
2261 | if (hmac) | ||
2262 | req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY); | ||
2263 | |||
2264 | switch (type) { | ||
2265 | case ARTPEC6_CRYPTO_HASH_SHA1: | ||
2266 | oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1; | ||
2267 | break; | ||
2268 | case ARTPEC6_CRYPTO_HASH_SHA256: | ||
2269 | oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256; | ||
2270 | break; | ||
2271 | case ARTPEC6_CRYPTO_HASH_SHA384: | ||
2272 | oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384; | ||
2273 | break; | ||
2274 | case ARTPEC6_CRYPTO_HASH_SHA512: | ||
2275 | oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512; | ||
2276 | break; | ||
2277 | |||
2278 | default: | ||
2279 | pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type); | ||
2280 | return -EINVAL; | ||
2281 | } | ||
2282 | |||
2283 | if (variant == ARTPEC6_CRYPTO) | ||
2284 | req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper); | ||
2285 | else | ||
2286 | req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper); | ||
2287 | |||
2288 | return 0; | ||
2289 | } | ||
2290 | |||
2291 | static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req) | ||
2292 | { | ||
2293 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2294 | int ret; | ||
2295 | |||
2296 | if (!req_ctx->common.dma) { | ||
2297 | ret = artpec6_crypto_common_init(&req_ctx->common, | ||
2298 | &req->base, | ||
2299 | artpec6_crypto_complete_hash, | ||
2300 | NULL, 0); | ||
2301 | |||
2302 | if (ret) | ||
2303 | return ret; | ||
2304 | } | ||
2305 | |||
2306 | ret = artpec6_crypto_prepare_hash(req); | ||
2307 | switch (ret) { | ||
2308 | case ARTPEC6_CRYPTO_PREPARE_HASH_START: | ||
2309 | ret = artpec6_crypto_submit(&req_ctx->common); | ||
2310 | break; | ||
2311 | |||
2312 | case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START: | ||
2313 | ret = 0; | ||
2314 | /* Fallthrough */ | ||
2315 | |||
2316 | default: | ||
2317 | artpec6_crypto_common_destroy(&req_ctx->common); | ||
2318 | break; | ||
2319 | } | ||
2320 | |||
2321 | return ret; | ||
2322 | } | ||
2323 | |||
2324 | static int artpec6_crypto_hash_final(struct ahash_request *req) | ||
2325 | { | ||
2326 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2327 | |||
2328 | req_ctx->hash_flags |= HASH_FLAG_FINALIZE; | ||
2329 | |||
2330 | return artpec6_crypto_prepare_submit_hash(req); | ||
2331 | } | ||
2332 | |||
2333 | static int artpec6_crypto_hash_update(struct ahash_request *req) | ||
2334 | { | ||
2335 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2336 | |||
2337 | req_ctx->hash_flags |= HASH_FLAG_UPDATE; | ||
2338 | |||
2339 | return artpec6_crypto_prepare_submit_hash(req); | ||
2340 | } | ||
2341 | |||
2342 | static int artpec6_crypto_sha1_init(struct ahash_request *req) | ||
2343 | { | ||
2344 | return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0); | ||
2345 | } | ||
2346 | |||
2347 | static int artpec6_crypto_sha1_digest(struct ahash_request *req) | ||
2348 | { | ||
2349 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2350 | |||
2351 | artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0); | ||
2352 | |||
2353 | req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; | ||
2354 | |||
2355 | return artpec6_crypto_prepare_submit_hash(req); | ||
2356 | } | ||
2357 | |||
2358 | static int artpec6_crypto_sha256_init(struct ahash_request *req) | ||
2359 | { | ||
2360 | return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0); | ||
2361 | } | ||
2362 | |||
2363 | static int artpec6_crypto_sha256_digest(struct ahash_request *req) | ||
2364 | { | ||
2365 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2366 | |||
2367 | artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0); | ||
2368 | req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; | ||
2369 | |||
2370 | return artpec6_crypto_prepare_submit_hash(req); | ||
2371 | } | ||
2372 | |||
2373 | static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req) | ||
2374 | { | ||
2375 | return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0); | ||
2376 | } | ||
2377 | |||
2378 | static int __maybe_unused | ||
2379 | artpec6_crypto_sha384_digest(struct ahash_request *req) | ||
2380 | { | ||
2381 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2382 | |||
2383 | artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0); | ||
2384 | req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; | ||
2385 | |||
2386 | return artpec6_crypto_prepare_submit_hash(req); | ||
2387 | } | ||
2388 | |||
2389 | static int artpec6_crypto_sha512_init(struct ahash_request *req) | ||
2390 | { | ||
2391 | return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0); | ||
2392 | } | ||
2393 | |||
2394 | static int artpec6_crypto_sha512_digest(struct ahash_request *req) | ||
2395 | { | ||
2396 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2397 | |||
2398 | artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0); | ||
2399 | req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; | ||
2400 | |||
2401 | return artpec6_crypto_prepare_submit_hash(req); | ||
2402 | } | ||
2403 | |||
2404 | static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req) | ||
2405 | { | ||
2406 | return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); | ||
2407 | } | ||
2408 | |||
2409 | static int __maybe_unused | ||
2410 | artpec6_crypto_hmac_sha384_init(struct ahash_request *req) | ||
2411 | { | ||
2412 | return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1); | ||
2413 | } | ||
2414 | |||
2415 | static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req) | ||
2416 | { | ||
2417 | return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1); | ||
2418 | } | ||
2419 | |||
2420 | static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req) | ||
2421 | { | ||
2422 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2423 | |||
2424 | artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); | ||
2425 | req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; | ||
2426 | |||
2427 | return artpec6_crypto_prepare_submit_hash(req); | ||
2428 | } | ||
2429 | |||
2430 | static int __maybe_unused | ||
2431 | artpec6_crypto_hmac_sha384_digest(struct ahash_request *req) | ||
2432 | { | ||
2433 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2434 | |||
2435 | artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1); | ||
2436 | req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; | ||
2437 | |||
2438 | return artpec6_crypto_prepare_submit_hash(req); | ||
2439 | } | ||
2440 | |||
2441 | static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req) | ||
2442 | { | ||
2443 | struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); | ||
2444 | |||
2445 | artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1); | ||
2446 | req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; | ||
2447 | |||
2448 | return artpec6_crypto_prepare_submit_hash(req); | ||
2449 | } | ||
2450 | |||
2451 | static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm, | ||
2452 | const char *base_hash_name) | ||
2453 | { | ||
2454 | struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); | ||
2455 | |||
2456 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
2457 | sizeof(struct artpec6_hash_request_context)); | ||
2458 | memset(tfm_ctx, 0, sizeof(*tfm_ctx)); | ||
2459 | |||
2460 | if (base_hash_name) { | ||
2461 | struct crypto_shash *child; | ||
2462 | |||
2463 | child = crypto_alloc_shash(base_hash_name, 0, | ||
2464 | CRYPTO_ALG_NEED_FALLBACK); | ||
2465 | |||
2466 | if (IS_ERR(child)) | ||
2467 | return PTR_ERR(child); | ||
2468 | |||
2469 | tfm_ctx->child_hash = child; | ||
2470 | } | ||
2471 | |||
2472 | return 0; | ||
2473 | } | ||
2474 | |||
2475 | static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm) | ||
2476 | { | ||
2477 | return artpec6_crypto_ahash_init_common(tfm, NULL); | ||
2478 | } | ||
2479 | |||
2480 | static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm) | ||
2481 | { | ||
2482 | return artpec6_crypto_ahash_init_common(tfm, "sha256"); | ||
2483 | } | ||
2484 | |||
2485 | static int __maybe_unused | ||
2486 | artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm) | ||
2487 | { | ||
2488 | return artpec6_crypto_ahash_init_common(tfm, "sha384"); | ||
2489 | } | ||
2490 | |||
2491 | static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm) | ||
2492 | { | ||
2493 | return artpec6_crypto_ahash_init_common(tfm, "sha512"); | ||
2494 | } | ||
2495 | |||
2496 | static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm) | ||
2497 | { | ||
2498 | struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); | ||
2499 | |||
2500 | if (tfm_ctx->child_hash) | ||
2501 | crypto_free_shash(tfm_ctx->child_hash); | ||
2502 | |||
2503 | memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key)); | ||
2504 | tfm_ctx->hmac_key_length = 0; | ||
2505 | } | ||
2506 | |||
2507 | static int artpec6_crypto_hash_export(struct ahash_request *req, void *out) | ||
2508 | { | ||
2509 | const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req); | ||
2510 | struct artpec6_hash_export_state *state = out; | ||
2511 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
2512 | enum artpec6_crypto_variant variant = ac->variant; | ||
2513 | |||
2514 | BUILD_BUG_ON(sizeof(state->partial_buffer) != | ||
2515 | sizeof(ctx->partial_buffer)); | ||
2516 | BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate)); | ||
2517 | |||
2518 | state->digcnt = ctx->digcnt; | ||
2519 | state->partial_bytes = ctx->partial_bytes; | ||
2520 | state->hash_flags = ctx->hash_flags; | ||
2521 | |||
2522 | if (variant == ARTPEC6_CRYPTO) | ||
2523 | state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md); | ||
2524 | else | ||
2525 | state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md); | ||
2526 | |||
2527 | memcpy(state->partial_buffer, ctx->partial_buffer, | ||
2528 | sizeof(state->partial_buffer)); | ||
2529 | memcpy(state->digeststate, ctx->digeststate, | ||
2530 | sizeof(state->digeststate)); | ||
2531 | |||
2532 | return 0; | ||
2533 | } | ||
2534 | |||
2535 | static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in) | ||
2536 | { | ||
2537 | struct artpec6_hash_request_context *ctx = ahash_request_ctx(req); | ||
2538 | const struct artpec6_hash_export_state *state = in; | ||
2539 | struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); | ||
2540 | enum artpec6_crypto_variant variant = ac->variant; | ||
2541 | |||
2542 | memset(ctx, 0, sizeof(*ctx)); | ||
2543 | |||
2544 | ctx->digcnt = state->digcnt; | ||
2545 | ctx->partial_bytes = state->partial_bytes; | ||
2546 | ctx->hash_flags = state->hash_flags; | ||
2547 | |||
2548 | if (variant == ARTPEC6_CRYPTO) | ||
2549 | ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper); | ||
2550 | else | ||
2551 | ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper); | ||
2552 | |||
2553 | memcpy(ctx->partial_buffer, state->partial_buffer, | ||
2554 | sizeof(state->partial_buffer)); | ||
2555 | memcpy(ctx->digeststate, state->digeststate, | ||
2556 | sizeof(state->digeststate)); | ||
2557 | |||
2558 | return 0; | ||
2559 | } | ||
2560 | |||
2561 | static int init_crypto_hw(struct artpec6_crypto *ac) | ||
2562 | { | ||
2563 | enum artpec6_crypto_variant variant = ac->variant; | ||
2564 | void __iomem *base = ac->base; | ||
2565 | u32 out_descr_buf_size; | ||
2566 | u32 out_data_buf_size; | ||
2567 | u32 in_data_buf_size; | ||
2568 | u32 in_descr_buf_size; | ||
2569 | u32 in_stat_buf_size; | ||
2570 | u32 in, out; | ||
2571 | |||
2572 | /* | ||
2573 | * The PDMA unit contains 1984 bytes of internal memory for the OUT | ||
2574 | * channels and 1024 bytes for the IN channel. This is an elastic | ||
2575 | * memory used to internally store the descriptors and data. The values | ||
2576 | * ares specified in 64 byte incremements. Trustzone buffers are not | ||
2577 | * used at this stage. | ||
2578 | */ | ||
2579 | out_data_buf_size = 16; /* 1024 bytes for data */ | ||
2580 | out_descr_buf_size = 15; /* 960 bytes for descriptors */ | ||
2581 | in_data_buf_size = 8; /* 512 bytes for data */ | ||
2582 | in_descr_buf_size = 4; /* 256 bytes for descriptors */ | ||
2583 | in_stat_buf_size = 4; /* 256 bytes for stat descrs */ | ||
2584 | |||
2585 | BUILD_BUG_ON_MSG((out_data_buf_size | ||
2586 | + out_descr_buf_size) * 64 > 1984, | ||
2587 | "Invalid OUT configuration"); | ||
2588 | |||
2589 | BUILD_BUG_ON_MSG((in_data_buf_size | ||
2590 | + in_descr_buf_size | ||
2591 | + in_stat_buf_size) * 64 > 1024, | ||
2592 | "Invalid IN configuration"); | ||
2593 | |||
2594 | in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) | | ||
2595 | FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) | | ||
2596 | FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size); | ||
2597 | |||
2598 | out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) | | ||
2599 | FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size); | ||
2600 | |||
2601 | writel_relaxed(out, base + PDMA_OUT_BUF_CFG); | ||
2602 | writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG); | ||
2603 | |||
2604 | if (variant == ARTPEC6_CRYPTO) { | ||
2605 | writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG); | ||
2606 | writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG); | ||
2607 | writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA | | ||
2608 | A6_PDMA_INTR_MASK_IN_EOP_FLUSH, | ||
2609 | base + A6_PDMA_INTR_MASK); | ||
2610 | } else { | ||
2611 | writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG); | ||
2612 | writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG); | ||
2613 | writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA | | ||
2614 | A7_PDMA_INTR_MASK_IN_EOP_FLUSH, | ||
2615 | base + A7_PDMA_INTR_MASK); | ||
2616 | } | ||
2617 | |||
2618 | return 0; | ||
2619 | } | ||
2620 | |||
2621 | static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac) | ||
2622 | { | ||
2623 | enum artpec6_crypto_variant variant = ac->variant; | ||
2624 | void __iomem *base = ac->base; | ||
2625 | |||
2626 | if (variant == ARTPEC6_CRYPTO) { | ||
2627 | writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD); | ||
2628 | writel_relaxed(0, base + A6_PDMA_IN_CFG); | ||
2629 | writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD); | ||
2630 | } else { | ||
2631 | writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD); | ||
2632 | writel_relaxed(0, base + A7_PDMA_IN_CFG); | ||
2633 | writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD); | ||
2634 | } | ||
2635 | |||
2636 | writel_relaxed(0, base + PDMA_OUT_CFG); | ||
2637 | |||
2638 | } | ||
2639 | |||
2640 | static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id) | ||
2641 | { | ||
2642 | struct artpec6_crypto *ac = dev_id; | ||
2643 | enum artpec6_crypto_variant variant = ac->variant; | ||
2644 | void __iomem *base = ac->base; | ||
2645 | u32 mask_in_data, mask_in_eop_flush; | ||
2646 | u32 in_cmd_flush_stat, in_cmd_reg; | ||
2647 | u32 ack_intr_reg; | ||
2648 | u32 ack = 0; | ||
2649 | u32 intr; | ||
2650 | |||
2651 | if (variant == ARTPEC6_CRYPTO) { | ||
2652 | intr = readl_relaxed(base + A6_PDMA_MASKED_INTR); | ||
2653 | mask_in_data = A6_PDMA_INTR_MASK_IN_DATA; | ||
2654 | mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH; | ||
2655 | in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT; | ||
2656 | in_cmd_reg = A6_PDMA_IN_CMD; | ||
2657 | ack_intr_reg = A6_PDMA_ACK_INTR; | ||
2658 | } else { | ||
2659 | intr = readl_relaxed(base + A7_PDMA_MASKED_INTR); | ||
2660 | mask_in_data = A7_PDMA_INTR_MASK_IN_DATA; | ||
2661 | mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH; | ||
2662 | in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT; | ||
2663 | in_cmd_reg = A7_PDMA_IN_CMD; | ||
2664 | ack_intr_reg = A7_PDMA_ACK_INTR; | ||
2665 | } | ||
2666 | |||
2667 | /* We get two interrupt notifications from each job. | ||
2668 | * The in_data means all data was sent to memory and then | ||
2669 | * we request a status flush command to write the per-job | ||
2670 | * status to its status vector. This ensures that the | ||
2671 | * tasklet can detect exactly how many submitted jobs | ||
2672 | * that have finished. | ||
2673 | */ | ||
2674 | if (intr & mask_in_data) | ||
2675 | ack |= mask_in_data; | ||
2676 | |||
2677 | if (intr & mask_in_eop_flush) | ||
2678 | ack |= mask_in_eop_flush; | ||
2679 | else | ||
2680 | writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg); | ||
2681 | |||
2682 | writel_relaxed(ack, base + ack_intr_reg); | ||
2683 | |||
2684 | if (intr & mask_in_eop_flush) | ||
2685 | tasklet_schedule(&ac->task); | ||
2686 | |||
2687 | return IRQ_HANDLED; | ||
2688 | } | ||
2689 | |||
2690 | /*------------------- Algorithm definitions ----------------------------------*/ | ||
2691 | |||
2692 | /* Hashes */ | ||
2693 | static struct ahash_alg hash_algos[] = { | ||
2694 | /* SHA-1 */ | ||
2695 | { | ||
2696 | .init = artpec6_crypto_sha1_init, | ||
2697 | .update = artpec6_crypto_hash_update, | ||
2698 | .final = artpec6_crypto_hash_final, | ||
2699 | .digest = artpec6_crypto_sha1_digest, | ||
2700 | .import = artpec6_crypto_hash_import, | ||
2701 | .export = artpec6_crypto_hash_export, | ||
2702 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
2703 | .halg.statesize = sizeof(struct artpec6_hash_export_state), | ||
2704 | .halg.base = { | ||
2705 | .cra_name = "sha1", | ||
2706 | .cra_driver_name = "artpec-sha1", | ||
2707 | .cra_priority = 300, | ||
2708 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
2709 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
2710 | .cra_ctxsize = sizeof(struct artpec6_hashalg_context), | ||
2711 | .cra_alignmask = 3, | ||
2712 | .cra_module = THIS_MODULE, | ||
2713 | .cra_init = artpec6_crypto_ahash_init, | ||
2714 | .cra_exit = artpec6_crypto_ahash_exit, | ||
2715 | } | ||
2716 | }, | ||
2717 | /* SHA-256 */ | ||
2718 | { | ||
2719 | .init = artpec6_crypto_sha256_init, | ||
2720 | .update = artpec6_crypto_hash_update, | ||
2721 | .final = artpec6_crypto_hash_final, | ||
2722 | .digest = artpec6_crypto_sha256_digest, | ||
2723 | .import = artpec6_crypto_hash_import, | ||
2724 | .export = artpec6_crypto_hash_export, | ||
2725 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
2726 | .halg.statesize = sizeof(struct artpec6_hash_export_state), | ||
2727 | .halg.base = { | ||
2728 | .cra_name = "sha256", | ||
2729 | .cra_driver_name = "artpec-sha256", | ||
2730 | .cra_priority = 300, | ||
2731 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
2732 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
2733 | .cra_ctxsize = sizeof(struct artpec6_hashalg_context), | ||
2734 | .cra_alignmask = 3, | ||
2735 | .cra_module = THIS_MODULE, | ||
2736 | .cra_init = artpec6_crypto_ahash_init, | ||
2737 | .cra_exit = artpec6_crypto_ahash_exit, | ||
2738 | } | ||
2739 | }, | ||
2740 | /* HMAC SHA-256 */ | ||
2741 | { | ||
2742 | .init = artpec6_crypto_hmac_sha256_init, | ||
2743 | .update = artpec6_crypto_hash_update, | ||
2744 | .final = artpec6_crypto_hash_final, | ||
2745 | .digest = artpec6_crypto_hmac_sha256_digest, | ||
2746 | .import = artpec6_crypto_hash_import, | ||
2747 | .export = artpec6_crypto_hash_export, | ||
2748 | .setkey = artpec6_crypto_hash_set_key, | ||
2749 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
2750 | .halg.statesize = sizeof(struct artpec6_hash_export_state), | ||
2751 | .halg.base = { | ||
2752 | .cra_name = "hmac(sha256)", | ||
2753 | .cra_driver_name = "artpec-hmac-sha256", | ||
2754 | .cra_priority = 300, | ||
2755 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
2756 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
2757 | .cra_ctxsize = sizeof(struct artpec6_hashalg_context), | ||
2758 | .cra_alignmask = 3, | ||
2759 | .cra_module = THIS_MODULE, | ||
2760 | .cra_init = artpec6_crypto_ahash_init_hmac_sha256, | ||
2761 | .cra_exit = artpec6_crypto_ahash_exit, | ||
2762 | } | ||
2763 | }, | ||
2764 | }; | ||
2765 | |||
2766 | static struct ahash_alg artpec7_hash_algos[] = { | ||
2767 | /* SHA-384 */ | ||
2768 | { | ||
2769 | .init = artpec6_crypto_sha384_init, | ||
2770 | .update = artpec6_crypto_hash_update, | ||
2771 | .final = artpec6_crypto_hash_final, | ||
2772 | .digest = artpec6_crypto_sha384_digest, | ||
2773 | .import = artpec6_crypto_hash_import, | ||
2774 | .export = artpec6_crypto_hash_export, | ||
2775 | .halg.digestsize = SHA384_DIGEST_SIZE, | ||
2776 | .halg.statesize = sizeof(struct artpec6_hash_export_state), | ||
2777 | .halg.base = { | ||
2778 | .cra_name = "sha384", | ||
2779 | .cra_driver_name = "artpec-sha384", | ||
2780 | .cra_priority = 300, | ||
2781 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
2782 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
2783 | .cra_ctxsize = sizeof(struct artpec6_hashalg_context), | ||
2784 | .cra_alignmask = 3, | ||
2785 | .cra_module = THIS_MODULE, | ||
2786 | .cra_init = artpec6_crypto_ahash_init, | ||
2787 | .cra_exit = artpec6_crypto_ahash_exit, | ||
2788 | } | ||
2789 | }, | ||
2790 | /* HMAC SHA-384 */ | ||
2791 | { | ||
2792 | .init = artpec6_crypto_hmac_sha384_init, | ||
2793 | .update = artpec6_crypto_hash_update, | ||
2794 | .final = artpec6_crypto_hash_final, | ||
2795 | .digest = artpec6_crypto_hmac_sha384_digest, | ||
2796 | .import = artpec6_crypto_hash_import, | ||
2797 | .export = artpec6_crypto_hash_export, | ||
2798 | .setkey = artpec6_crypto_hash_set_key, | ||
2799 | .halg.digestsize = SHA384_DIGEST_SIZE, | ||
2800 | .halg.statesize = sizeof(struct artpec6_hash_export_state), | ||
2801 | .halg.base = { | ||
2802 | .cra_name = "hmac(sha384)", | ||
2803 | .cra_driver_name = "artpec-hmac-sha384", | ||
2804 | .cra_priority = 300, | ||
2805 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
2806 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
2807 | .cra_ctxsize = sizeof(struct artpec6_hashalg_context), | ||
2808 | .cra_alignmask = 3, | ||
2809 | .cra_module = THIS_MODULE, | ||
2810 | .cra_init = artpec6_crypto_ahash_init_hmac_sha384, | ||
2811 | .cra_exit = artpec6_crypto_ahash_exit, | ||
2812 | } | ||
2813 | }, | ||
2814 | /* SHA-512 */ | ||
2815 | { | ||
2816 | .init = artpec6_crypto_sha512_init, | ||
2817 | .update = artpec6_crypto_hash_update, | ||
2818 | .final = artpec6_crypto_hash_final, | ||
2819 | .digest = artpec6_crypto_sha512_digest, | ||
2820 | .import = artpec6_crypto_hash_import, | ||
2821 | .export = artpec6_crypto_hash_export, | ||
2822 | .halg.digestsize = SHA512_DIGEST_SIZE, | ||
2823 | .halg.statesize = sizeof(struct artpec6_hash_export_state), | ||
2824 | .halg.base = { | ||
2825 | .cra_name = "sha512", | ||
2826 | .cra_driver_name = "artpec-sha512", | ||
2827 | .cra_priority = 300, | ||
2828 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
2829 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
2830 | .cra_ctxsize = sizeof(struct artpec6_hashalg_context), | ||
2831 | .cra_alignmask = 3, | ||
2832 | .cra_module = THIS_MODULE, | ||
2833 | .cra_init = artpec6_crypto_ahash_init, | ||
2834 | .cra_exit = artpec6_crypto_ahash_exit, | ||
2835 | } | ||
2836 | }, | ||
2837 | /* HMAC SHA-512 */ | ||
2838 | { | ||
2839 | .init = artpec6_crypto_hmac_sha512_init, | ||
2840 | .update = artpec6_crypto_hash_update, | ||
2841 | .final = artpec6_crypto_hash_final, | ||
2842 | .digest = artpec6_crypto_hmac_sha512_digest, | ||
2843 | .import = artpec6_crypto_hash_import, | ||
2844 | .export = artpec6_crypto_hash_export, | ||
2845 | .setkey = artpec6_crypto_hash_set_key, | ||
2846 | .halg.digestsize = SHA512_DIGEST_SIZE, | ||
2847 | .halg.statesize = sizeof(struct artpec6_hash_export_state), | ||
2848 | .halg.base = { | ||
2849 | .cra_name = "hmac(sha512)", | ||
2850 | .cra_driver_name = "artpec-hmac-sha512", | ||
2851 | .cra_priority = 300, | ||
2852 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
2853 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
2854 | .cra_ctxsize = sizeof(struct artpec6_hashalg_context), | ||
2855 | .cra_alignmask = 3, | ||
2856 | .cra_module = THIS_MODULE, | ||
2857 | .cra_init = artpec6_crypto_ahash_init_hmac_sha512, | ||
2858 | .cra_exit = artpec6_crypto_ahash_exit, | ||
2859 | } | ||
2860 | }, | ||
2861 | }; | ||
2862 | |||
2863 | /* Crypto */ | ||
2864 | static struct skcipher_alg crypto_algos[] = { | ||
2865 | /* AES - ECB */ | ||
2866 | { | ||
2867 | .base = { | ||
2868 | .cra_name = "ecb(aes)", | ||
2869 | .cra_driver_name = "artpec6-ecb-aes", | ||
2870 | .cra_priority = 300, | ||
2871 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | | ||
2872 | CRYPTO_ALG_ASYNC, | ||
2873 | .cra_blocksize = AES_BLOCK_SIZE, | ||
2874 | .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), | ||
2875 | .cra_alignmask = 3, | ||
2876 | .cra_module = THIS_MODULE, | ||
2877 | }, | ||
2878 | .min_keysize = AES_MIN_KEY_SIZE, | ||
2879 | .max_keysize = AES_MAX_KEY_SIZE, | ||
2880 | .setkey = artpec6_crypto_cipher_set_key, | ||
2881 | .encrypt = artpec6_crypto_encrypt, | ||
2882 | .decrypt = artpec6_crypto_decrypt, | ||
2883 | .init = artpec6_crypto_aes_ecb_init, | ||
2884 | .exit = artpec6_crypto_aes_exit, | ||
2885 | }, | ||
2886 | /* AES - CTR */ | ||
2887 | { | ||
2888 | .base = { | ||
2889 | .cra_name = "ctr(aes)", | ||
2890 | .cra_driver_name = "artpec6-ctr-aes", | ||
2891 | .cra_priority = 300, | ||
2892 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | | ||
2893 | CRYPTO_ALG_ASYNC | | ||
2894 | CRYPTO_ALG_NEED_FALLBACK, | ||
2895 | .cra_blocksize = 1, | ||
2896 | .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), | ||
2897 | .cra_alignmask = 3, | ||
2898 | .cra_module = THIS_MODULE, | ||
2899 | }, | ||
2900 | .min_keysize = AES_MIN_KEY_SIZE, | ||
2901 | .max_keysize = AES_MAX_KEY_SIZE, | ||
2902 | .ivsize = AES_BLOCK_SIZE, | ||
2903 | .setkey = artpec6_crypto_cipher_set_key, | ||
2904 | .encrypt = artpec6_crypto_ctr_encrypt, | ||
2905 | .decrypt = artpec6_crypto_ctr_decrypt, | ||
2906 | .init = artpec6_crypto_aes_ctr_init, | ||
2907 | .exit = artpec6_crypto_aes_ctr_exit, | ||
2908 | }, | ||
2909 | /* AES - CBC */ | ||
2910 | { | ||
2911 | .base = { | ||
2912 | .cra_name = "cbc(aes)", | ||
2913 | .cra_driver_name = "artpec6-cbc-aes", | ||
2914 | .cra_priority = 300, | ||
2915 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | | ||
2916 | CRYPTO_ALG_ASYNC, | ||
2917 | .cra_blocksize = AES_BLOCK_SIZE, | ||
2918 | .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), | ||
2919 | .cra_alignmask = 3, | ||
2920 | .cra_module = THIS_MODULE, | ||
2921 | }, | ||
2922 | .min_keysize = AES_MIN_KEY_SIZE, | ||
2923 | .max_keysize = AES_MAX_KEY_SIZE, | ||
2924 | .ivsize = AES_BLOCK_SIZE, | ||
2925 | .setkey = artpec6_crypto_cipher_set_key, | ||
2926 | .encrypt = artpec6_crypto_encrypt, | ||
2927 | .decrypt = artpec6_crypto_decrypt, | ||
2928 | .init = artpec6_crypto_aes_cbc_init, | ||
2929 | .exit = artpec6_crypto_aes_exit | ||
2930 | }, | ||
2931 | /* AES - XTS */ | ||
2932 | { | ||
2933 | .base = { | ||
2934 | .cra_name = "xts(aes)", | ||
2935 | .cra_driver_name = "artpec6-xts-aes", | ||
2936 | .cra_priority = 300, | ||
2937 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | | ||
2938 | CRYPTO_ALG_ASYNC, | ||
2939 | .cra_blocksize = 1, | ||
2940 | .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), | ||
2941 | .cra_alignmask = 3, | ||
2942 | .cra_module = THIS_MODULE, | ||
2943 | }, | ||
2944 | .min_keysize = 2*AES_MIN_KEY_SIZE, | ||
2945 | .max_keysize = 2*AES_MAX_KEY_SIZE, | ||
2946 | .ivsize = 16, | ||
2947 | .setkey = artpec6_crypto_xts_set_key, | ||
2948 | .encrypt = artpec6_crypto_encrypt, | ||
2949 | .decrypt = artpec6_crypto_decrypt, | ||
2950 | .init = artpec6_crypto_aes_xts_init, | ||
2951 | .exit = artpec6_crypto_aes_exit, | ||
2952 | }, | ||
2953 | }; | ||
2954 | |||
2955 | static struct aead_alg aead_algos[] = { | ||
2956 | { | ||
2957 | .init = artpec6_crypto_aead_init, | ||
2958 | .setkey = artpec6_crypto_aead_set_key, | ||
2959 | .encrypt = artpec6_crypto_aead_encrypt, | ||
2960 | .decrypt = artpec6_crypto_aead_decrypt, | ||
2961 | .ivsize = AES_BLOCK_SIZE, | ||
2962 | .maxauthsize = AES_BLOCK_SIZE, | ||
2963 | |||
2964 | .base = { | ||
2965 | .cra_name = "gcm(aes)", | ||
2966 | .cra_driver_name = "artpec-gcm-aes", | ||
2967 | .cra_priority = 300, | ||
2968 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | | ||
2969 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
2970 | .cra_blocksize = 1, | ||
2971 | .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), | ||
2972 | .cra_alignmask = 3, | ||
2973 | .cra_module = THIS_MODULE, | ||
2974 | }, | ||
2975 | } | ||
2976 | }; | ||
2977 | |||
2978 | #ifdef CONFIG_DEBUG_FS | ||
2979 | |||
2980 | struct dbgfs_u32 { | ||
2981 | char *name; | ||
2982 | mode_t mode; | ||
2983 | u32 *flag; | ||
2984 | char *desc; | ||
2985 | }; | ||
2986 | |||
2987 | static void artpec6_crypto_init_debugfs(void) | ||
2988 | { | ||
2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); | ||
2990 | |||
2991 | if (!dbgfs_root || IS_ERR(dbgfs_root)) { | ||
2992 | dbgfs_root = NULL; | ||
2993 | pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME); | ||
2994 | return; | ||
2995 | } | ||
2996 | |||
2997 | #ifdef CONFIG_FAULT_INJECTION | ||
2998 | fault_create_debugfs_attr("fail_status_read", dbgfs_root, | ||
2999 | &artpec6_crypto_fail_status_read); | ||
3000 | |||
3001 | fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root, | ||
3002 | &artpec6_crypto_fail_dma_array_full); | ||
3003 | #endif | ||
3004 | } | ||
3005 | |||
3006 | static void artpec6_crypto_free_debugfs(void) | ||
3007 | { | ||
3008 | if (!dbgfs_root) | ||
3009 | return; | ||
3010 | |||
3011 | debugfs_remove_recursive(dbgfs_root); | ||
3012 | dbgfs_root = NULL; | ||
3013 | } | ||
3014 | #endif | ||
3015 | |||
3016 | static const struct of_device_id artpec6_crypto_of_match[] = { | ||
3017 | { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO }, | ||
3018 | { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO }, | ||
3019 | {} | ||
3020 | }; | ||
3021 | MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match); | ||
3022 | |||
3023 | static int artpec6_crypto_probe(struct platform_device *pdev) | ||
3024 | { | ||
3025 | const struct of_device_id *match; | ||
3026 | enum artpec6_crypto_variant variant; | ||
3027 | struct artpec6_crypto *ac; | ||
3028 | struct device *dev = &pdev->dev; | ||
3029 | void __iomem *base; | ||
3030 | struct resource *res; | ||
3031 | int irq; | ||
3032 | int err; | ||
3033 | |||
3034 | if (artpec6_crypto_dev) | ||
3035 | return -ENODEV; | ||
3036 | |||
3037 | match = of_match_node(artpec6_crypto_of_match, dev->of_node); | ||
3038 | if (!match) | ||
3039 | return -EINVAL; | ||
3040 | |||
3041 | variant = (enum artpec6_crypto_variant)match->data; | ||
3042 | |||
3043 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
3044 | if (!res) | ||
3045 | return -ENODEV; | ||
3046 | |||
3047 | base = devm_ioremap_resource(&pdev->dev, res); | ||
3048 | if (IS_ERR(base)) | ||
3049 | return PTR_ERR(base); | ||
3050 | |||
3051 | irq = platform_get_irq(pdev, 0); | ||
3052 | if (irq < 0) | ||
3053 | return -ENODEV; | ||
3054 | |||
3055 | ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto), | ||
3056 | GFP_KERNEL); | ||
3057 | if (!ac) | ||
3058 | return -ENOMEM; | ||
3059 | |||
3060 | platform_set_drvdata(pdev, ac); | ||
3061 | ac->variant = variant; | ||
3062 | |||
3063 | spin_lock_init(&ac->queue_lock); | ||
3064 | INIT_LIST_HEAD(&ac->queue); | ||
3065 | INIT_LIST_HEAD(&ac->pending); | ||
3066 | setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac); | ||
3067 | |||
3068 | ac->base = base; | ||
3069 | |||
3070 | ac->dma_cache = kmem_cache_create("artpec6_crypto_dma", | ||
3071 | sizeof(struct artpec6_crypto_dma_descriptors), | ||
3072 | 64, | ||
3073 | 0, | ||
3074 | NULL); | ||
3075 | if (!ac->dma_cache) | ||
3076 | return -ENOMEM; | ||
3077 | |||
3078 | #ifdef CONFIG_DEBUG_FS | ||
3079 | artpec6_crypto_init_debugfs(); | ||
3080 | #endif | ||
3081 | |||
3082 | tasklet_init(&ac->task, artpec6_crypto_task, | ||
3083 | (unsigned long)ac); | ||
3084 | |||
3085 | ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, | ||
3086 | GFP_KERNEL); | ||
3087 | if (!ac->pad_buffer) | ||
3088 | return -ENOMEM; | ||
3089 | ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX); | ||
3090 | |||
3091 | ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, | ||
3092 | GFP_KERNEL); | ||
3093 | if (!ac->zero_buffer) | ||
3094 | return -ENOMEM; | ||
3095 | ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX); | ||
3096 | |||
3097 | err = init_crypto_hw(ac); | ||
3098 | if (err) | ||
3099 | goto free_cache; | ||
3100 | |||
3101 | err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0, | ||
3102 | "artpec6-crypto", ac); | ||
3103 | if (err) | ||
3104 | goto disable_hw; | ||
3105 | |||
3106 | artpec6_crypto_dev = &pdev->dev; | ||
3107 | |||
3108 | err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); | ||
3109 | if (err) { | ||
3110 | dev_err(dev, "Failed to register ahashes\n"); | ||
3111 | goto disable_hw; | ||
3112 | } | ||
3113 | |||
3114 | if (variant != ARTPEC6_CRYPTO) { | ||
3115 | err = crypto_register_ahashes(artpec7_hash_algos, | ||
3116 | ARRAY_SIZE(artpec7_hash_algos)); | ||
3117 | if (err) { | ||
3118 | dev_err(dev, "Failed to register ahashes\n"); | ||
3119 | goto unregister_ahashes; | ||
3120 | } | ||
3121 | } | ||
3122 | |||
3123 | err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); | ||
3124 | if (err) { | ||
3125 | dev_err(dev, "Failed to register ciphers\n"); | ||
3126 | goto unregister_a7_ahashes; | ||
3127 | } | ||
3128 | |||
3129 | err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos)); | ||
3130 | if (err) { | ||
3131 | dev_err(dev, "Failed to register aeads\n"); | ||
3132 | goto unregister_algs; | ||
3133 | } | ||
3134 | |||
3135 | return 0; | ||
3136 | |||
3137 | unregister_algs: | ||
3138 | crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); | ||
3139 | unregister_a7_ahashes: | ||
3140 | if (variant != ARTPEC6_CRYPTO) | ||
3141 | crypto_unregister_ahashes(artpec7_hash_algos, | ||
3142 | ARRAY_SIZE(artpec7_hash_algos)); | ||
3143 | unregister_ahashes: | ||
3144 | crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); | ||
3145 | disable_hw: | ||
3146 | artpec6_crypto_disable_hw(ac); | ||
3147 | free_cache: | ||
3148 | kmem_cache_destroy(ac->dma_cache); | ||
3149 | return err; | ||
3150 | } | ||
3151 | |||
3152 | static int artpec6_crypto_remove(struct platform_device *pdev) | ||
3153 | { | ||
3154 | struct artpec6_crypto *ac = platform_get_drvdata(pdev); | ||
3155 | int irq = platform_get_irq(pdev, 0); | ||
3156 | |||
3157 | crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); | ||
3158 | if (ac->variant != ARTPEC6_CRYPTO) | ||
3159 | crypto_unregister_ahashes(artpec7_hash_algos, | ||
3160 | ARRAY_SIZE(artpec7_hash_algos)); | ||
3161 | crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); | ||
3162 | crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos)); | ||
3163 | |||
3164 | tasklet_disable(&ac->task); | ||
3165 | devm_free_irq(&pdev->dev, irq, ac); | ||
3166 | tasklet_kill(&ac->task); | ||
3167 | del_timer_sync(&ac->timer); | ||
3168 | |||
3169 | artpec6_crypto_disable_hw(ac); | ||
3170 | |||
3171 | kmem_cache_destroy(ac->dma_cache); | ||
3172 | #ifdef CONFIG_DEBUG_FS | ||
3173 | artpec6_crypto_free_debugfs(); | ||
3174 | #endif | ||
3175 | return 0; | ||
3176 | } | ||
3177 | |||
3178 | static struct platform_driver artpec6_crypto_driver = { | ||
3179 | .probe = artpec6_crypto_probe, | ||
3180 | .remove = artpec6_crypto_remove, | ||
3181 | .driver = { | ||
3182 | .name = "artpec6-crypto", | ||
3183 | .owner = THIS_MODULE, | ||
3184 | .of_match_table = artpec6_crypto_of_match, | ||
3185 | }, | ||
3186 | }; | ||
3187 | |||
3188 | module_platform_driver(artpec6_crypto_driver); | ||
3189 | |||
3190 | MODULE_AUTHOR("Axis Communications AB"); | ||
3191 | MODULE_DESCRIPTION("ARTPEC-6 Crypto driver"); | ||
3192 | MODULE_LICENSE("GPL"); | ||