aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorHoria Geantă <horia.geanta@nxp.com>2016-11-22 08:44:09 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2016-11-28 08:23:23 -0500
commit8cea7b66b821fd914aa26a2af156604f9ef5f709 (patch)
tree8f5eed83990b83528e02db55221c23b71135b385 /drivers/crypto
parent746f069038961e8a72780c0625acc8accafd28fe (diff)
crypto: caam - refactor encryption descriptors generation
Refactor the generation of the authenc, ablkcipher shared descriptors and exports the functionality, such that they could be shared with the upcoming caam/qi (Queue Interface) driver. Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/Kconfig3
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c1109
-rw-r--r--drivers/crypto/caam/caamalg_desc.c1302
-rw-r--r--drivers/crypto/caam/caamalg_desc.h97
5 files changed, 1440 insertions, 1072 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ebeada75ab2d..da24c5752c06 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -134,3 +134,6 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
134 help 134 help
135 Selecting this will enable printing of various debug 135 Selecting this will enable printing of various debug
136 information in the CAAM driver. 136 information in the CAAM driver.
137
138config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
139 def_bool CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 08bf5515ae8a..6554742f357e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@ endif
8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o 9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o 14obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 9cb95f5b2eb3..78b0b7c17205 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,6 +2,7 @@
2 * caam - Freescale FSL CAAM support for crypto API 2 * caam - Freescale FSL CAAM support for crypto API
3 * 3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc. 4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2016 NXP
5 * 6 *
6 * Based on talitos crypto API driver. 7 * Based on talitos crypto API driver.
7 * 8 *
@@ -53,6 +54,7 @@
53#include "error.h" 54#include "error.h"
54#include "sg_sw_sec4.h" 55#include "sg_sw_sec4.h"
55#include "key_gen.h" 56#include "key_gen.h"
57#include "caamalg_desc.h"
56 58
57/* 59/*
58 * crypto alg 60 * crypto alg
@@ -69,37 +71,6 @@
69#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ 71#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 5) 72 CAAM_CMD_SZ * 5)
71 73
72/* length of descriptors text */
73#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
74#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
75#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
76#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
77
78/* Note: Nonce is counted in enckeylen */
79#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
80
81#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
82#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
83#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
84
85#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
86#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
87#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
88
89#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
90#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
91#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
92
93#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
94#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
95#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
96
97#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
98#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
99 20 * CAAM_CMD_SZ)
100#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
101 15 * CAAM_CMD_SZ)
102
103#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) 74#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
104#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 75#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
105 76
@@ -159,52 +130,6 @@ struct caam_aead_alg {
159 bool registered; 130 bool registered;
160}; 131};
161 132
162/* Set DK bit in class 1 operation if shared */
163static inline void append_dec_op1(u32 *desc, u32 type)
164{
165 u32 *jump_cmd, *uncond_jump_cmd;
166
167 /* DK bit is valid only for AES */
168 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
169 append_operation(desc, type | OP_ALG_AS_INITFINAL |
170 OP_ALG_DECRYPT);
171 return;
172 }
173
174 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
175 append_operation(desc, type | OP_ALG_AS_INITFINAL |
176 OP_ALG_DECRYPT);
177 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
178 set_jump_tgt_here(desc, jump_cmd);
179 append_operation(desc, type | OP_ALG_AS_INITFINAL |
180 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
181 set_jump_tgt_here(desc, uncond_jump_cmd);
182}
183
184/*
185 * For aead functions, read payload and write payload,
186 * both of which are specified in req->src and req->dst
187 */
188static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
189{
190 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
191 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
192 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
193}
194
195/*
196 * For ablkcipher encrypt and decrypt, read from req->src and
197 * write to req->dst
198 */
199static inline void ablkcipher_append_src_dst(u32 *desc)
200{
201 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
202 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
203 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
204 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
205 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
206}
207
208/* 133/*
209 * per-session context 134 * per-session context
210 */ 135 */
@@ -223,66 +148,10 @@ struct caam_ctx {
223 unsigned int authsize; 148 unsigned int authsize;
224}; 149};
225 150
226static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
227 bool is_rfc3686)
228{
229 u32 *key_jump_cmd;
230 unsigned int enckeylen = ctx->cdata.keylen;
231
232 /* Note: Context registers are saved. */
233 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
234
235 /* Skip if already shared */
236 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
237 JUMP_COND_SHRD);
238
239 /*
240 * RFC3686 specific:
241 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
242 * | enckeylen = encryption key size + nonce size
243 */
244 if (is_rfc3686)
245 enckeylen -= CTR_RFC3686_NONCE_SIZE;
246
247 if (ctx->adata.key_inline)
248 append_key_as_imm(desc, (void *)ctx->adata.key,
249 ctx->adata.keylen_pad, ctx->adata.keylen,
250 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
251 else
252 append_key(desc, ctx->adata.key, ctx->adata.keylen, CLASS_2 |
253 KEY_DEST_MDHA_SPLIT | KEY_ENC);
254
255 if (ctx->cdata.key_inline)
256 append_key_as_imm(desc, (void *)ctx->cdata.key, enckeylen,
257 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
258 else
259 append_key(desc, ctx->cdata.key, enckeylen, CLASS_1 |
260 KEY_DEST_CLASS_REG);
261
262 /* Load Counter into CONTEXT1 reg */
263 if (is_rfc3686) {
264 u32 *nonce;
265
266 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
267 enckeylen);
268 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
269 LDST_CLASS_IND_CCB |
270 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
271 append_move(desc,
272 MOVE_SRC_OUTFIFO |
273 MOVE_DEST_CLASS1CTX |
274 (16 << MOVE_OFFSET_SHIFT) |
275 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
276 }
277
278 set_jump_tgt_here(desc, key_jump_cmd);
279}
280
281static int aead_null_set_sh_desc(struct crypto_aead *aead) 151static int aead_null_set_sh_desc(struct crypto_aead *aead)
282{ 152{
283 struct caam_ctx *ctx = crypto_aead_ctx(aead); 153 struct caam_ctx *ctx = crypto_aead_ctx(aead);
284 struct device *jrdev = ctx->jrdev; 154 struct device *jrdev = ctx->jrdev;
285 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
286 u32 *desc; 155 u32 *desc;
287 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - 156 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
288 ctx->adata.keylen_pad; 157 ctx->adata.keylen_pad;
@@ -301,58 +170,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
301 170
302 /* aead_encrypt shared descriptor */ 171 /* aead_encrypt shared descriptor */
303 desc = ctx->sh_desc_enc; 172 desc = ctx->sh_desc_enc;
304 173 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
305 init_sh_desc(desc, HDR_SHARE_SERIAL);
306
307 /* Skip if already shared */
308 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
309 JUMP_COND_SHRD);
310 if (ctx->adata.key_inline)
311 append_key_as_imm(desc, (void *)ctx->adata.key,
312 ctx->adata.keylen_pad, ctx->adata.keylen,
313 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
314 else
315 append_key(desc, ctx->adata.key, ctx->adata.keylen, CLASS_2 |
316 KEY_DEST_MDHA_SPLIT | KEY_ENC);
317 set_jump_tgt_here(desc, key_jump_cmd);
318
319 /* assoclen + cryptlen = seqinlen */
320 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
321
322 /* Prepare to read and write cryptlen + assoclen bytes */
323 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
324 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
325
326 /*
327 * MOVE_LEN opcode is not available in all SEC HW revisions,
328 * thus need to do some magic, i.e. self-patch the descriptor
329 * buffer.
330 */
331 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
332 MOVE_DEST_MATH3 |
333 (0x6 << MOVE_LEN_SHIFT));
334 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
335 MOVE_DEST_DESCBUF |
336 MOVE_WAITCOMP |
337 (0x8 << MOVE_LEN_SHIFT));
338
339 /* Class 2 operation */
340 append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
341 OP_ALG_ENCRYPT);
342
343 /* Read and write cryptlen bytes */
344 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
345
346 set_move_tgt_here(desc, read_move_cmd);
347 set_move_tgt_here(desc, write_move_cmd);
348 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
349 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
350 MOVE_AUX_LS);
351
352 /* Write ICV */
353 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
354 LDST_SRCDST_BYTE_CONTEXT);
355
356 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 174 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
357 desc_bytes(desc), 175 desc_bytes(desc),
358 DMA_TO_DEVICE); 176 DMA_TO_DEVICE);
@@ -360,12 +178,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
360 dev_err(jrdev, "unable to map shared descriptor\n"); 178 dev_err(jrdev, "unable to map shared descriptor\n");
361 return -ENOMEM; 179 return -ENOMEM;
362 } 180 }
363#ifdef DEBUG
364 print_hex_dump(KERN_ERR,
365 "aead null enc shdesc@"__stringify(__LINE__)": ",
366 DUMP_PREFIX_ADDRESS, 16, 4, desc,
367 desc_bytes(desc), 1);
368#endif
369 181
370 /* 182 /*
371 * Job Descriptor and Shared Descriptors 183 * Job Descriptor and Shared Descriptors
@@ -379,68 +191,9 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
379 ctx->adata.key = ctx->key_dma; 191 ctx->adata.key = ctx->key_dma;
380 } 192 }
381 193
382 desc = ctx->sh_desc_dec;
383
384 /* aead_decrypt shared descriptor */ 194 /* aead_decrypt shared descriptor */
385 init_sh_desc(desc, HDR_SHARE_SERIAL); 195 desc = ctx->sh_desc_dec;
386 196 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
387 /* Skip if already shared */
388 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
389 JUMP_COND_SHRD);
390 if (ctx->adata.key_inline)
391 append_key_as_imm(desc, (void *)ctx->adata.key,
392 ctx->adata.keylen_pad, ctx->adata.keylen,
393 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
394 else
395 append_key(desc, ctx->adata.key, ctx->adata.keylen, CLASS_2 |
396 KEY_DEST_MDHA_SPLIT | KEY_ENC);
397 set_jump_tgt_here(desc, key_jump_cmd);
398
399 /* Class 2 operation */
400 append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
401 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
402
403 /* assoclen + cryptlen = seqoutlen */
404 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
405
406 /* Prepare to read and write cryptlen + assoclen bytes */
407 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
408 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
409
410 /*
411 * MOVE_LEN opcode is not available in all SEC HW revisions,
412 * thus need to do some magic, i.e. self-patch the descriptor
413 * buffer.
414 */
415 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
416 MOVE_DEST_MATH2 |
417 (0x6 << MOVE_LEN_SHIFT));
418 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
419 MOVE_DEST_DESCBUF |
420 MOVE_WAITCOMP |
421 (0x8 << MOVE_LEN_SHIFT));
422
423 /* Read and write cryptlen bytes */
424 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
425
426 /*
427 * Insert a NOP here, since we need at least 4 instructions between
428 * code patching the descriptor buffer and the location being patched.
429 */
430 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
431 set_jump_tgt_here(desc, jump_cmd);
432
433 set_move_tgt_here(desc, read_move_cmd);
434 set_move_tgt_here(desc, write_move_cmd);
435 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
436 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
437 MOVE_AUX_LS);
438 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
439
440 /* Load ICV */
441 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
442 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
443
444 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 197 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
445 desc_bytes(desc), 198 desc_bytes(desc),
446 DMA_TO_DEVICE); 199 DMA_TO_DEVICE);
@@ -448,12 +201,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
448 dev_err(jrdev, "unable to map shared descriptor\n"); 201 dev_err(jrdev, "unable to map shared descriptor\n");
449 return -ENOMEM; 202 return -ENOMEM;
450 } 203 }
451#ifdef DEBUG
452 print_hex_dump(KERN_ERR,
453 "aead null dec shdesc@"__stringify(__LINE__)": ",
454 DUMP_PREFIX_ADDRESS, 16, 4, desc,
455 desc_bytes(desc), 1);
456#endif
457 204
458 return 0; 205 return 0;
459} 206}
@@ -465,9 +212,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
465 unsigned int ivsize = crypto_aead_ivsize(aead); 212 unsigned int ivsize = crypto_aead_ivsize(aead);
466 struct caam_ctx *ctx = crypto_aead_ctx(aead); 213 struct caam_ctx *ctx = crypto_aead_ctx(aead);
467 struct device *jrdev = ctx->jrdev; 214 struct device *jrdev = ctx->jrdev;
468 u32 geniv, moveiv;
469 u32 ctx1_iv_off = 0; 215 u32 ctx1_iv_off = 0;
470 u32 *desc; 216 u32 *desc, *nonce = NULL;
471 u32 inl_mask; 217 u32 inl_mask;
472 unsigned int data_len[2]; 218 unsigned int data_len[2];
473 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 219 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
@@ -493,8 +239,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
493 * RFC3686 specific: 239 * RFC3686 specific:
494 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 240 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
495 */ 241 */
496 if (is_rfc3686) 242 if (is_rfc3686) {
497 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 243 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
244 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
245 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
246 }
498 247
499 data_len[0] = ctx->adata.keylen_pad; 248 data_len[0] = ctx->adata.keylen_pad;
500 data_len[1] = ctx->cdata.keylen; 249 data_len[1] = ctx->cdata.keylen;
@@ -527,45 +276,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
527 276
528 /* aead_encrypt shared descriptor */ 277 /* aead_encrypt shared descriptor */
529 desc = ctx->sh_desc_enc; 278 desc = ctx->sh_desc_enc;
530 279 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
531 /* Note: Context registers are saved. */ 280 is_rfc3686, nonce, ctx1_iv_off);
532 init_sh_desc_key_aead(desc, ctx, is_rfc3686);
533
534 /* Class 2 operation */
535 append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
536 OP_ALG_ENCRYPT);
537
538 /* Read and write assoclen bytes */
539 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
540 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
541
542 /* Skip assoc data */
543 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
544
545 /* read assoc before reading payload */
546 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
547 FIFOLDST_VLF);
548
549 /* Load Counter into CONTEXT1 reg */
550 if (is_rfc3686)
551 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
552 LDST_SRCDST_BYTE_CONTEXT |
553 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
554 LDST_OFFSET_SHIFT));
555
556 /* Class 1 operation */
557 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
558 OP_ALG_ENCRYPT);
559
560 /* Read and write cryptlen bytes */
561 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
562 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
563 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
564
565 /* Write ICV */
566 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
567 LDST_SRCDST_BYTE_CONTEXT);
568
569 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 281 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
570 desc_bytes(desc), 282 desc_bytes(desc),
571 DMA_TO_DEVICE); 283 DMA_TO_DEVICE);
@@ -573,11 +285,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
573 dev_err(jrdev, "unable to map shared descriptor\n"); 285 dev_err(jrdev, "unable to map shared descriptor\n");
574 return -ENOMEM; 286 return -ENOMEM;
575 } 287 }
576#ifdef DEBUG
577 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
578 DUMP_PREFIX_ADDRESS, 16, 4, desc,
579 desc_bytes(desc), 1);
580#endif
581 288
582skip_enc: 289skip_enc:
583 /* 290 /*
@@ -605,59 +312,9 @@ skip_enc:
605 312
606 /* aead_decrypt shared descriptor */ 313 /* aead_decrypt shared descriptor */
607 desc = ctx->sh_desc_dec; 314 desc = ctx->sh_desc_dec;
608 315 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
609 /* Note: Context registers are saved. */ 316 ctx->authsize, alg->caam.geniv, is_rfc3686,
610 init_sh_desc_key_aead(desc, ctx, is_rfc3686); 317 nonce, ctx1_iv_off);
611
612 /* Class 2 operation */
613 append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
614 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
615
616 /* Read and write assoclen bytes */
617 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
618 if (alg->caam.geniv)
619 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
620 else
621 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
622
623 /* Skip assoc data */
624 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
625
626 /* read assoc before reading payload */
627 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
628 KEY_VLF);
629
630 if (alg->caam.geniv) {
631 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
632 LDST_SRCDST_BYTE_CONTEXT |
633 (ctx1_iv_off << LDST_OFFSET_SHIFT));
634 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
635 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
636 }
637
638 /* Load Counter into CONTEXT1 reg */
639 if (is_rfc3686)
640 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
641 LDST_SRCDST_BYTE_CONTEXT |
642 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
643 LDST_OFFSET_SHIFT));
644
645 /* Choose operation */
646 if (ctr_mode)
647 append_operation(desc, ctx->cdata.algtype |
648 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
649 else
650 append_dec_op1(desc, ctx->cdata.algtype);
651
652 /* Read and write cryptlen bytes */
653 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
654 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
655 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
656
657 /* Load ICV */
658 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
659 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
660
661 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 318 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
662 desc_bytes(desc), 319 desc_bytes(desc),
663 DMA_TO_DEVICE); 320 DMA_TO_DEVICE);
@@ -665,11 +322,6 @@ skip_enc:
665 dev_err(jrdev, "unable to map shared descriptor\n"); 322 dev_err(jrdev, "unable to map shared descriptor\n");
666 return -ENOMEM; 323 return -ENOMEM;
667 } 324 }
668#ifdef DEBUG
669 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
670 DUMP_PREFIX_ADDRESS, 16, 4, desc,
671 desc_bytes(desc), 1);
672#endif
673 325
674 if (!alg->caam.geniv) 326 if (!alg->caam.geniv)
675 goto skip_givenc; 327 goto skip_givenc;
@@ -699,83 +351,9 @@ skip_enc:
699 351
700 /* aead_givencrypt shared descriptor */ 352 /* aead_givencrypt shared descriptor */
701 desc = ctx->sh_desc_enc; 353 desc = ctx->sh_desc_enc;
702 354 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
703 /* Note: Context registers are saved. */ 355 ctx->authsize, is_rfc3686, nonce,
704 init_sh_desc_key_aead(desc, ctx, is_rfc3686); 356 ctx1_iv_off);
705
706 if (is_rfc3686)
707 goto copy_iv;
708
709 /* Generate IV */
710 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
711 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
712 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
713 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
714 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
715 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
716 append_move(desc, MOVE_WAITCOMP |
717 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
718 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
719 (ivsize << MOVE_LEN_SHIFT));
720 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
721
722copy_iv:
723 /* Copy IV to class 1 context */
724 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
725 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
726 (ivsize << MOVE_LEN_SHIFT));
727
728 /* Return to encryption */
729 append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
730 OP_ALG_ENCRYPT);
731
732 /* Read and write assoclen bytes */
733 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
734 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
735
736 /* Skip assoc data */
737 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
738
739 /* read assoc before reading payload */
740 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
741 KEY_VLF);
742
743 /* Copy iv from outfifo to class 2 fifo */
744 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
745 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
746 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
747 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
748 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
749 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
750
751 /* Load Counter into CONTEXT1 reg */
752 if (is_rfc3686)
753 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
754 LDST_SRCDST_BYTE_CONTEXT |
755 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
756 LDST_OFFSET_SHIFT));
757
758 /* Class 1 operation */
759 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
760 OP_ALG_ENCRYPT);
761
762 /* Will write ivsize + cryptlen */
763 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
764
765 /* Not need to reload iv */
766 append_seq_fifo_load(desc, ivsize,
767 FIFOLD_CLASS_SKIP);
768
769 /* Will read cryptlen */
770 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
771 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
772 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
773 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
774
775 /* Write ICV */
776 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
777 LDST_SRCDST_BYTE_CONTEXT);
778
779 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 357 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
780 desc_bytes(desc), 358 desc_bytes(desc),
781 DMA_TO_DEVICE); 359 DMA_TO_DEVICE);
@@ -783,11 +361,6 @@ copy_iv:
783 dev_err(jrdev, "unable to map shared descriptor\n"); 361 dev_err(jrdev, "unable to map shared descriptor\n");
784 return -ENOMEM; 362 return -ENOMEM;
785 } 363 }
786#ifdef DEBUG
787 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
788 DUMP_PREFIX_ADDRESS, 16, 4, desc,
789 desc_bytes(desc), 1);
790#endif
791 364
792skip_givenc: 365skip_givenc:
793 return 0; 366 return 0;
@@ -808,8 +381,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
808{ 381{
809 struct caam_ctx *ctx = crypto_aead_ctx(aead); 382 struct caam_ctx *ctx = crypto_aead_ctx(aead);
810 struct device *jrdev = ctx->jrdev; 383 struct device *jrdev = ctx->jrdev;
811 u32 *key_jump_cmd, *zero_payload_jump_cmd,
812 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
813 u32 *desc; 384 u32 *desc;
814 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 385 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
815 ctx->cdata.keylen; 386 ctx->cdata.keylen;
@@ -831,78 +402,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
831 } 402 }
832 403
833 desc = ctx->sh_desc_enc; 404 desc = ctx->sh_desc_enc;
834 405 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
835 init_sh_desc(desc, HDR_SHARE_SERIAL);
836
837 /* skip key loading if they are loaded due to sharing */
838 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
839 JUMP_COND_SHRD | JUMP_COND_SELF);
840 if (ctx->cdata.key_inline)
841 append_key_as_imm(desc, (void *)ctx->cdata.key,
842 ctx->cdata.keylen, ctx->cdata.keylen,
843 CLASS_1 | KEY_DEST_CLASS_REG);
844 else
845 append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
846 KEY_DEST_CLASS_REG);
847 set_jump_tgt_here(desc, key_jump_cmd);
848
849 /* class 1 operation */
850 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
851 OP_ALG_ENCRYPT);
852
853 /* if assoclen + cryptlen is ZERO, skip to ICV write */
854 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
855 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
856 JUMP_COND_MATH_Z);
857
858 /* if assoclen is ZERO, skip reading the assoc data */
859 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
860 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
861 JUMP_COND_MATH_Z);
862
863 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
864
865 /* skip assoc data */
866 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
867
868 /* cryptlen = seqinlen - assoclen */
869 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
870
871 /* if cryptlen is ZERO jump to zero-payload commands */
872 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
873 JUMP_COND_MATH_Z);
874
875 /* read assoc data */
876 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
877 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
878 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
879
880 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
881
882 /* write encrypted data */
883 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
884
885 /* read payload data */
886 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
887 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
888
889 /* jump the zero-payload commands */
890 append_jump(desc, JUMP_TEST_ALL | 2);
891
892 /* zero-payload commands */
893 set_jump_tgt_here(desc, zero_payload_jump_cmd);
894
895 /* read assoc data */
896 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
897 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
898
899 /* There is no input data */
900 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
901
902 /* write ICV */
903 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
904 LDST_SRCDST_BYTE_CONTEXT);
905
906 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 406 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
907 desc_bytes(desc), 407 desc_bytes(desc),
908 DMA_TO_DEVICE); 408 DMA_TO_DEVICE);
@@ -910,11 +410,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
910 dev_err(jrdev, "unable to map shared descriptor\n"); 410 dev_err(jrdev, "unable to map shared descriptor\n");
911 return -ENOMEM; 411 return -ENOMEM;
912 } 412 }
913#ifdef DEBUG
914 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
915 DUMP_PREFIX_ADDRESS, 16, 4, desc,
916 desc_bytes(desc), 1);
917#endif
918 413
919 /* 414 /*
920 * Job Descriptor and Shared Descriptors 415 * Job Descriptor and Shared Descriptors
@@ -929,65 +424,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
929 } 424 }
930 425
931 desc = ctx->sh_desc_dec; 426 desc = ctx->sh_desc_dec;
932 427 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
933 init_sh_desc(desc, HDR_SHARE_SERIAL);
934
935 /* skip key loading if they are loaded due to sharing */
936 key_jump_cmd = append_jump(desc, JUMP_JSL |
937 JUMP_TEST_ALL | JUMP_COND_SHRD |
938 JUMP_COND_SELF);
939 if (ctx->cdata.key_inline)
940 append_key_as_imm(desc, (void *)ctx->cdata.key,
941 ctx->cdata.keylen, ctx->cdata.keylen,
942 CLASS_1 | KEY_DEST_CLASS_REG);
943 else
944 append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
945 KEY_DEST_CLASS_REG);
946 set_jump_tgt_here(desc, key_jump_cmd);
947
948 /* class 1 operation */
949 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
950 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
951
952 /* if assoclen is ZERO, skip reading the assoc data */
953 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
954 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
955 JUMP_COND_MATH_Z);
956
957 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
958
959 /* skip assoc data */
960 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
961
962 /* read assoc data */
963 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
964 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
965
966 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
967
968 /* cryptlen = seqoutlen - assoclen */
969 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
970
971 /* jump to zero-payload command if cryptlen is zero */
972 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
973 JUMP_COND_MATH_Z);
974
975 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
976
977 /* store encrypted data */
978 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
979
980 /* read payload data */
981 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
982 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
983
984 /* zero-payload command */
985 set_jump_tgt_here(desc, zero_payload_jump_cmd);
986
987 /* read ICV */
988 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
989 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
990
991 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 428 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
992 desc_bytes(desc), 429 desc_bytes(desc),
993 DMA_TO_DEVICE); 430 DMA_TO_DEVICE);
@@ -995,11 +432,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
995 dev_err(jrdev, "unable to map shared descriptor\n"); 432 dev_err(jrdev, "unable to map shared descriptor\n");
996 return -ENOMEM; 433 return -ENOMEM;
997 } 434 }
998#ifdef DEBUG
999 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
1000 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1001 desc_bytes(desc), 1);
1002#endif
1003 435
1004 return 0; 436 return 0;
1005} 437}
@@ -1018,7 +450,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
1018{ 450{
1019 struct caam_ctx *ctx = crypto_aead_ctx(aead); 451 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1020 struct device *jrdev = ctx->jrdev; 452 struct device *jrdev = ctx->jrdev;
1021 u32 *key_jump_cmd;
1022 u32 *desc; 453 u32 *desc;
1023 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 454 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1024 ctx->cdata.keylen; 455 ctx->cdata.keylen;
@@ -1040,58 +471,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
1040 } 471 }
1041 472
1042 desc = ctx->sh_desc_enc; 473 desc = ctx->sh_desc_enc;
1043 474 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
1044 init_sh_desc(desc, HDR_SHARE_SERIAL);
1045
1046 /* Skip key loading if it is loaded due to sharing */
1047 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1048 JUMP_COND_SHRD);
1049 if (ctx->cdata.key_inline)
1050 append_key_as_imm(desc, (void *)ctx->cdata.key,
1051 ctx->cdata.keylen, ctx->cdata.keylen,
1052 CLASS_1 | KEY_DEST_CLASS_REG);
1053 else
1054 append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
1055 KEY_DEST_CLASS_REG);
1056 set_jump_tgt_here(desc, key_jump_cmd);
1057
1058 /* Class 1 operation */
1059 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
1060 OP_ALG_ENCRYPT);
1061
1062 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1063 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1064
1065 /* Read assoc data */
1066 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1067 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1068
1069 /* Skip IV */
1070 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1071
1072 /* Will read cryptlen bytes */
1073 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1074
1075 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1076 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1077
1078 /* Skip assoc data */
1079 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1080
1081 /* cryptlen = seqoutlen - assoclen */
1082 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
1083
1084 /* Write encrypted data */
1085 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1086
1087 /* Read payload data */
1088 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1089 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1090
1091 /* Write ICV */
1092 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1093 LDST_SRCDST_BYTE_CONTEXT);
1094
1095 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 475 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1096 desc_bytes(desc), 476 desc_bytes(desc),
1097 DMA_TO_DEVICE); 477 DMA_TO_DEVICE);
@@ -1099,11 +479,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
1099 dev_err(jrdev, "unable to map shared descriptor\n"); 479 dev_err(jrdev, "unable to map shared descriptor\n");
1100 return -ENOMEM; 480 return -ENOMEM;
1101 } 481 }
1102#ifdef DEBUG
1103 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1104 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1105 desc_bytes(desc), 1);
1106#endif
1107 482
1108 /* 483 /*
1109 * Job Descriptor and Shared Descriptors 484 * Job Descriptor and Shared Descriptors
@@ -1118,58 +493,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
1118 } 493 }
1119 494
1120 desc = ctx->sh_desc_dec; 495 desc = ctx->sh_desc_dec;
1121 496 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
1122 init_sh_desc(desc, HDR_SHARE_SERIAL);
1123
1124 /* Skip key loading if it is loaded due to sharing */
1125 key_jump_cmd = append_jump(desc, JUMP_JSL |
1126 JUMP_TEST_ALL | JUMP_COND_SHRD);
1127 if (ctx->cdata.key_inline)
1128 append_key_as_imm(desc, (void *)ctx->cdata.key,
1129 ctx->cdata.keylen, ctx->cdata.keylen,
1130 CLASS_1 | KEY_DEST_CLASS_REG);
1131 else
1132 append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
1133 KEY_DEST_CLASS_REG);
1134 set_jump_tgt_here(desc, key_jump_cmd);
1135
1136 /* Class 1 operation */
1137 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
1138 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1139
1140 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1141 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1142
1143 /* Read assoc data */
1144 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1145 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1146
1147 /* Skip IV */
1148 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1149
1150 /* Will read cryptlen bytes */
1151 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1152
1153 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1154 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1155
1156 /* Skip assoc data */
1157 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1158
1159 /* Will write cryptlen bytes */
1160 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1161
1162 /* Store payload data */
1163 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1164
1165 /* Read encrypted data */
1166 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1167 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1168
1169 /* Read ICV */
1170 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1171 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1172
1173 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 497 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1174 desc_bytes(desc), 498 desc_bytes(desc),
1175 DMA_TO_DEVICE); 499 DMA_TO_DEVICE);
@@ -1177,11 +501,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
1177 dev_err(jrdev, "unable to map shared descriptor\n"); 501 dev_err(jrdev, "unable to map shared descriptor\n");
1178 return -ENOMEM; 502 return -ENOMEM;
1179 } 503 }
1180#ifdef DEBUG
1181 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1182 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1183 desc_bytes(desc), 1);
1184#endif
1185 504
1186 return 0; 505 return 0;
1187} 506}
@@ -1201,8 +520,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1201{ 520{
1202 struct caam_ctx *ctx = crypto_aead_ctx(aead); 521 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1203 struct device *jrdev = ctx->jrdev; 522 struct device *jrdev = ctx->jrdev;
1204 u32 *key_jump_cmd;
1205 u32 *read_move_cmd, *write_move_cmd;
1206 u32 *desc; 523 u32 *desc;
1207 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 524 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1208 ctx->cdata.keylen; 525 ctx->cdata.keylen;
@@ -1224,57 +541,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1224 } 541 }
1225 542
1226 desc = ctx->sh_desc_enc; 543 desc = ctx->sh_desc_enc;
1227 544 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
1228 init_sh_desc(desc, HDR_SHARE_SERIAL);
1229
1230 /* Skip key loading if it is loaded due to sharing */
1231 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1232 JUMP_COND_SHRD);
1233 if (ctx->cdata.key_inline)
1234 append_key_as_imm(desc, (void *)ctx->cdata.key,
1235 ctx->cdata.keylen, ctx->cdata.keylen,
1236 CLASS_1 | KEY_DEST_CLASS_REG);
1237 else
1238 append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
1239 KEY_DEST_CLASS_REG);
1240 set_jump_tgt_here(desc, key_jump_cmd);
1241
1242 /* Class 1 operation */
1243 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
1244 OP_ALG_ENCRYPT);
1245
1246 /* assoclen + cryptlen = seqinlen */
1247 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1248
1249 /*
1250 * MOVE_LEN opcode is not available in all SEC HW revisions,
1251 * thus need to do some magic, i.e. self-patch the descriptor
1252 * buffer.
1253 */
1254 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1255 (0x6 << MOVE_LEN_SHIFT));
1256 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1257 (0x8 << MOVE_LEN_SHIFT));
1258
1259 /* Will read assoclen + cryptlen bytes */
1260 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1261
1262 /* Will write assoclen + cryptlen bytes */
1263 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1264
1265 /* Read and write assoclen + cryptlen bytes */
1266 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1267
1268 set_move_tgt_here(desc, read_move_cmd);
1269 set_move_tgt_here(desc, write_move_cmd);
1270 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1271 /* Move payload data to OFIFO */
1272 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1273
1274 /* Write ICV */
1275 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1276 LDST_SRCDST_BYTE_CONTEXT);
1277
1278 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 545 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1279 desc_bytes(desc), 546 desc_bytes(desc),
1280 DMA_TO_DEVICE); 547 DMA_TO_DEVICE);
@@ -1282,11 +549,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1282 dev_err(jrdev, "unable to map shared descriptor\n"); 549 dev_err(jrdev, "unable to map shared descriptor\n");
1283 return -ENOMEM; 550 return -ENOMEM;
1284 } 551 }
1285#ifdef DEBUG
1286 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1287 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1288 desc_bytes(desc), 1);
1289#endif
1290 552
1291 /* 553 /*
1292 * Job Descriptor and Shared Descriptors 554 * Job Descriptor and Shared Descriptors
@@ -1301,62 +563,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1301 } 563 }
1302 564
1303 desc = ctx->sh_desc_dec; 565 desc = ctx->sh_desc_dec;
1304 566 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
1305 init_sh_desc(desc, HDR_SHARE_SERIAL);
1306
1307 /* Skip key loading if it is loaded due to sharing */
1308 key_jump_cmd = append_jump(desc, JUMP_JSL |
1309 JUMP_TEST_ALL | JUMP_COND_SHRD);
1310 if (ctx->cdata.key_inline)
1311 append_key_as_imm(desc, (void *)ctx->cdata.key,
1312 ctx->cdata.keylen, ctx->cdata.keylen,
1313 CLASS_1 | KEY_DEST_CLASS_REG);
1314 else
1315 append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
1316 KEY_DEST_CLASS_REG);
1317 set_jump_tgt_here(desc, key_jump_cmd);
1318
1319 /* Class 1 operation */
1320 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
1321 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1322
1323 /* assoclen + cryptlen = seqoutlen */
1324 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1325
1326 /*
1327 * MOVE_LEN opcode is not available in all SEC HW revisions,
1328 * thus need to do some magic, i.e. self-patch the descriptor
1329 * buffer.
1330 */
1331 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1332 (0x6 << MOVE_LEN_SHIFT));
1333 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1334 (0x8 << MOVE_LEN_SHIFT));
1335
1336 /* Will read assoclen + cryptlen bytes */
1337 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1338
1339 /* Will write assoclen + cryptlen bytes */
1340 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1341
1342 /* Store payload data */
1343 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1344
1345 /* In-snoop assoclen + cryptlen data */
1346 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1347 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1348
1349 set_move_tgt_here(desc, read_move_cmd);
1350 set_move_tgt_here(desc, write_move_cmd);
1351 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1352 /* Move payload data to OFIFO */
1353 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1354 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1355
1356 /* Read ICV */
1357 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1358 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1359
1360 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 567 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1361 desc_bytes(desc), 568 desc_bytes(desc),
1362 DMA_TO_DEVICE); 569 DMA_TO_DEVICE);
@@ -1364,11 +571,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1364 dev_err(jrdev, "unable to map shared descriptor\n"); 571 dev_err(jrdev, "unable to map shared descriptor\n");
1365 return -ENOMEM; 572 return -ENOMEM;
1366 } 573 }
1367#ifdef DEBUG
1368 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1369 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1370 desc_bytes(desc), 1);
1371#endif
1372 574
1373 return 0; 575 return 0;
1374} 576}
@@ -1569,21 +771,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1569 const u8 *key, unsigned int keylen) 771 const u8 *key, unsigned int keylen)
1570{ 772{
1571 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 773 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1572 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1573 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); 774 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1574 const char *alg_name = crypto_tfm_alg_name(tfm); 775 const char *alg_name = crypto_tfm_alg_name(tfm);
1575 struct device *jrdev = ctx->jrdev; 776 struct device *jrdev = ctx->jrdev;
1576 int ret = 0; 777 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1577 u32 *key_jump_cmd;
1578 u32 *desc; 778 u32 *desc;
1579 u8 *nonce;
1580 u32 geniv;
1581 u32 ctx1_iv_off = 0; 779 u32 ctx1_iv_off = 0;
1582 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 780 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1583 OP_ALG_AAI_CTR_MOD128); 781 OP_ALG_AAI_CTR_MOD128);
1584 const bool is_rfc3686 = (ctr_mode && 782 const bool is_rfc3686 = (ctr_mode &&
1585 (strstr(alg_name, "rfc3686") != NULL)); 783 (strstr(alg_name, "rfc3686") != NULL));
1586 784
785 memcpy(ctx->key, key, keylen);
1587#ifdef DEBUG 786#ifdef DEBUG
1588 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 787 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 788 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -1606,7 +805,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1606 keylen -= CTR_RFC3686_NONCE_SIZE; 805 keylen -= CTR_RFC3686_NONCE_SIZE;
1607 } 806 }
1608 807
1609 memcpy(ctx->key, key, keylen);
1610 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 808 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1611 DMA_TO_DEVICE); 809 DMA_TO_DEVICE);
1612 if (dma_mapping_error(jrdev, ctx->key_dma)) { 810 if (dma_mapping_error(jrdev, ctx->key_dma)) {
@@ -1619,48 +817,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1619 817
1620 /* ablkcipher_encrypt shared descriptor */ 818 /* ablkcipher_encrypt shared descriptor */
1621 desc = ctx->sh_desc_enc; 819 desc = ctx->sh_desc_enc;
1622 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); 820 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
1623 /* Skip if already shared */ 821 ctx1_iv_off);
1624 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1625 JUMP_COND_SHRD);
1626
1627 /* Load class1 key only */
1628 append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
1629 ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1630
1631 /* Load nonce into CONTEXT1 reg */
1632 if (is_rfc3686) {
1633 nonce = (u8 *)key + keylen;
1634 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1635 LDST_CLASS_IND_CCB |
1636 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1637 append_move(desc, MOVE_WAITCOMP |
1638 MOVE_SRC_OUTFIFO |
1639 MOVE_DEST_CLASS1CTX |
1640 (16 << MOVE_OFFSET_SHIFT) |
1641 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1642 }
1643
1644 set_jump_tgt_here(desc, key_jump_cmd);
1645
1646 /* Load iv */
1647 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1648 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1649
1650 /* Load counter into CONTEXT1 reg */
1651 if (is_rfc3686)
1652 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1653 LDST_SRCDST_BYTE_CONTEXT |
1654 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1655 LDST_OFFSET_SHIFT));
1656
1657 /* Load operation */
1658 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
1659 OP_ALG_ENCRYPT);
1660
1661 /* Perform operation */
1662 ablkcipher_append_src_dst(desc);
1663
1664 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 822 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1665 desc_bytes(desc), 823 desc_bytes(desc),
1666 DMA_TO_DEVICE); 824 DMA_TO_DEVICE);
@@ -1668,60 +826,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1668 dev_err(jrdev, "unable to map shared descriptor\n"); 826 dev_err(jrdev, "unable to map shared descriptor\n");
1669 return -ENOMEM; 827 return -ENOMEM;
1670 } 828 }
1671#ifdef DEBUG 829
1672 print_hex_dump(KERN_ERR,
1673 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1674 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1675 desc_bytes(desc), 1);
1676#endif
1677 /* ablkcipher_decrypt shared descriptor */ 830 /* ablkcipher_decrypt shared descriptor */
1678 desc = ctx->sh_desc_dec; 831 desc = ctx->sh_desc_dec;
1679 832 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
1680 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); 833 ctx1_iv_off);
1681 /* Skip if already shared */
1682 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1683 JUMP_COND_SHRD);
1684
1685 /* Load class1 key only */
1686 append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
1687 ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1688
1689 /* Load nonce into CONTEXT1 reg */
1690 if (is_rfc3686) {
1691 nonce = (u8 *)key + keylen;
1692 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1693 LDST_CLASS_IND_CCB |
1694 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1695 append_move(desc, MOVE_WAITCOMP |
1696 MOVE_SRC_OUTFIFO |
1697 MOVE_DEST_CLASS1CTX |
1698 (16 << MOVE_OFFSET_SHIFT) |
1699 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1700 }
1701
1702 set_jump_tgt_here(desc, key_jump_cmd);
1703
1704 /* load IV */
1705 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1706 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1707
1708 /* Load counter into CONTEXT1 reg */
1709 if (is_rfc3686)
1710 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1711 LDST_SRCDST_BYTE_CONTEXT |
1712 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1713 LDST_OFFSET_SHIFT));
1714
1715 /* Choose operation */
1716 if (ctr_mode)
1717 append_operation(desc, ctx->cdata.algtype |
1718 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1719 else
1720 append_dec_op1(desc, ctx->cdata.algtype);
1721
1722 /* Perform operation */
1723 ablkcipher_append_src_dst(desc);
1724
1725 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 834 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1726 desc_bytes(desc), 835 desc_bytes(desc),
1727 DMA_TO_DEVICE); 836 DMA_TO_DEVICE);
@@ -1730,75 +839,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1730 return -ENOMEM; 839 return -ENOMEM;
1731 } 840 }
1732 841
1733#ifdef DEBUG
1734 print_hex_dump(KERN_ERR,
1735 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1736 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1737 desc_bytes(desc), 1);
1738#endif
1739 /* ablkcipher_givencrypt shared descriptor */ 842 /* ablkcipher_givencrypt shared descriptor */
1740 desc = ctx->sh_desc_givenc; 843 desc = ctx->sh_desc_givenc;
1741 844 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
1742 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); 845 ctx1_iv_off);
1743 /* Skip if already shared */
1744 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1745 JUMP_COND_SHRD);
1746
1747 /* Load class1 key only */
1748 append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
1749 ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1750
1751 /* Load Nonce into CONTEXT1 reg */
1752 if (is_rfc3686) {
1753 nonce = (u8 *)key + keylen;
1754 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1755 LDST_CLASS_IND_CCB |
1756 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1757 append_move(desc, MOVE_WAITCOMP |
1758 MOVE_SRC_OUTFIFO |
1759 MOVE_DEST_CLASS1CTX |
1760 (16 << MOVE_OFFSET_SHIFT) |
1761 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1762 }
1763 set_jump_tgt_here(desc, key_jump_cmd);
1764
1765 /* Generate IV */
1766 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1767 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1768 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1769 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1770 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1771 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1772 append_move(desc, MOVE_WAITCOMP |
1773 MOVE_SRC_INFIFO |
1774 MOVE_DEST_CLASS1CTX |
1775 (crt->ivsize << MOVE_LEN_SHIFT) |
1776 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1777 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1778
1779 /* Copy generated IV to memory */
1780 append_seq_store(desc, crt->ivsize,
1781 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1782 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1783
1784 /* Load Counter into CONTEXT1 reg */
1785 if (is_rfc3686)
1786 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1787 LDST_SRCDST_BYTE_CONTEXT |
1788 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1789 LDST_OFFSET_SHIFT));
1790
1791 if (ctx1_iv_off)
1792 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1793 (1 << JUMP_OFFSET_SHIFT));
1794
1795 /* Load operation */
1796 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
1797 OP_ALG_ENCRYPT);
1798
1799 /* Perform operation */
1800 ablkcipher_append_src_dst(desc);
1801
1802 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, 846 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1803 desc_bytes(desc), 847 desc_bytes(desc),
1804 DMA_TO_DEVICE); 848 DMA_TO_DEVICE);
@@ -1806,14 +850,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1806 dev_err(jrdev, "unable to map shared descriptor\n"); 850 dev_err(jrdev, "unable to map shared descriptor\n");
1807 return -ENOMEM; 851 return -ENOMEM;
1808 } 852 }
1809#ifdef DEBUG
1810 print_hex_dump(KERN_ERR,
1811 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1812 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1813 desc_bytes(desc), 1);
1814#endif
1815 853
1816 return ret; 854 return 0;
1817} 855}
1818 856
1819static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 857static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -1821,8 +859,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1821{ 859{
1822 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 860 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1823 struct device *jrdev = ctx->jrdev; 861 struct device *jrdev = ctx->jrdev;
1824 u32 *key_jump_cmd, *desc; 862 u32 *desc;
1825 __be64 sector_size = cpu_to_be64(512);
1826 863
1827 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 864 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1828 crypto_ablkcipher_set_flags(ablkcipher, 865 crypto_ablkcipher_set_flags(ablkcipher,
@@ -1843,84 +880,17 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1843 880
1844 /* xts_ablkcipher_encrypt shared descriptor */ 881 /* xts_ablkcipher_encrypt shared descriptor */
1845 desc = ctx->sh_desc_enc; 882 desc = ctx->sh_desc_enc;
1846 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); 883 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
1847 /* Skip if already shared */
1848 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1849 JUMP_COND_SHRD);
1850
1851 /* Load class1 keys only */
1852 append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
1853 ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1854
1855 /* Load sector size with index 40 bytes (0x28) */
1856 append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
1857 LDST_SRCDST_BYTE_CONTEXT |
1858 (0x28 << LDST_OFFSET_SHIFT));
1859
1860 set_jump_tgt_here(desc, key_jump_cmd);
1861
1862 /*
1863 * create sequence for loading the sector index
1864 * Upper 8B of IV - will be used as sector index
1865 * Lower 8B of IV - will be discarded
1866 */
1867 append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1868 (0x20 << LDST_OFFSET_SHIFT));
1869 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1870
1871 /* Load operation */
1872 append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
1873 OP_ALG_ENCRYPT);
1874
1875 /* Perform operation */
1876 ablkcipher_append_src_dst(desc);
1877
1878 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 884 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1879 DMA_TO_DEVICE); 885 DMA_TO_DEVICE);
1880 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 886 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1881 dev_err(jrdev, "unable to map shared descriptor\n"); 887 dev_err(jrdev, "unable to map shared descriptor\n");
1882 return -ENOMEM; 888 return -ENOMEM;
1883 } 889 }
1884#ifdef DEBUG
1885 print_hex_dump(KERN_ERR,
1886 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1887 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1888#endif
1889 890
1890 /* xts_ablkcipher_decrypt shared descriptor */ 891 /* xts_ablkcipher_decrypt shared descriptor */
1891 desc = ctx->sh_desc_dec; 892 desc = ctx->sh_desc_dec;
1892 893 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
1893 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1894 /* Skip if already shared */
1895 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1896 JUMP_COND_SHRD);
1897
1898 /* Load class1 key only */
1899 append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
1900 ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1901
1902 /* Load sector size with index 40 bytes (0x28) */
1903 append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
1904 LDST_SRCDST_BYTE_CONTEXT |
1905 (0x28 << LDST_OFFSET_SHIFT));
1906
1907 set_jump_tgt_here(desc, key_jump_cmd);
1908
1909 /*
1910 * create sequence for loading the sector index
1911 * Upper 8B of IV - will be used as sector index
1912 * Lower 8B of IV - will be discarded
1913 */
1914 append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1915 (0x20 << LDST_OFFSET_SHIFT));
1916 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1917
1918 /* Load operation */
1919 append_dec_op1(desc, ctx->cdata.algtype);
1920
1921 /* Perform operation */
1922 ablkcipher_append_src_dst(desc);
1923
1924 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 894 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1925 DMA_TO_DEVICE); 895 DMA_TO_DEVICE);
1926 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 896 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
@@ -1929,11 +899,6 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1929 dev_err(jrdev, "unable to map shared descriptor\n"); 899 dev_err(jrdev, "unable to map shared descriptor\n");
1930 return -ENOMEM; 900 return -ENOMEM;
1931 } 901 }
1932#ifdef DEBUG
1933 print_hex_dump(KERN_ERR,
1934 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1935 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1936#endif
1937 902
1938 return 0; 903 return 0;
1939} 904}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 000000000000..fa2479d9da24
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1302 @@
1/*
2 * Shared descriptors for aead, ablkcipher algorithms
3 *
4 * Copyright 2016 NXP
5 */
6
7#include "compat.h"
8#include "desc_constr.h"
9#include "caamalg_desc.h"
10
11/*
12 * For aead functions, read payload and write payload,
13 * both of which are specified in req->src and req->dst
14 */
15static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
16{
17 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
18 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
19 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
20}
21
22/* Set DK bit in class 1 operation if shared */
23static inline void append_dec_op1(u32 *desc, u32 type)
24{
25 u32 *jump_cmd, *uncond_jump_cmd;
26
27 /* DK bit is valid only for AES */
28 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
29 append_operation(desc, type | OP_ALG_AS_INITFINAL |
30 OP_ALG_DECRYPT);
31 return;
32 }
33
34 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
35 append_operation(desc, type | OP_ALG_AS_INITFINAL |
36 OP_ALG_DECRYPT);
37 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
38 set_jump_tgt_here(desc, jump_cmd);
39 append_operation(desc, type | OP_ALG_AS_INITFINAL |
40 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
41 set_jump_tgt_here(desc, uncond_jump_cmd);
42}
43
44/**
45 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
46 * (non-protocol) with no (null) encryption.
47 * @desc: pointer to buffer used for descriptor construction
48 * @adata: pointer to authentication transform definitions. Note that since a
49 * split key is to be used, the size of the split key itself is
50 * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
51 * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
52 * @icvsize: integrity check value (ICV) size (truncated or full)
53 *
54 * Note: Requires an MDHA split key.
55 */
56void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
57 unsigned int icvsize)
58{
59 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
60
61 init_sh_desc(desc, HDR_SHARE_SERIAL);
62
63 /* Skip if already shared */
64 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
65 JUMP_COND_SHRD);
66 if (adata->key_inline)
67 append_key_as_imm(desc, (void *)adata->key, adata->keylen_pad,
68 adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
69 KEY_ENC);
70 else
71 append_key(desc, adata->key, adata->keylen, CLASS_2 |
72 KEY_DEST_MDHA_SPLIT | KEY_ENC);
73 set_jump_tgt_here(desc, key_jump_cmd);
74
75 /* assoclen + cryptlen = seqinlen */
76 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
77
78 /* Prepare to read and write cryptlen + assoclen bytes */
79 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
80 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
81
82 /*
83 * MOVE_LEN opcode is not available in all SEC HW revisions,
84 * thus need to do some magic, i.e. self-patch the descriptor
85 * buffer.
86 */
87 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
88 MOVE_DEST_MATH3 |
89 (0x6 << MOVE_LEN_SHIFT));
90 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
91 MOVE_DEST_DESCBUF |
92 MOVE_WAITCOMP |
93 (0x8 << MOVE_LEN_SHIFT));
94
95 /* Class 2 operation */
96 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
97 OP_ALG_ENCRYPT);
98
99 /* Read and write cryptlen bytes */
100 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
101
102 set_move_tgt_here(desc, read_move_cmd);
103 set_move_tgt_here(desc, write_move_cmd);
104 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
105 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
106 MOVE_AUX_LS);
107
108 /* Write ICV */
109 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
110 LDST_SRCDST_BYTE_CONTEXT);
111
112#ifdef DEBUG
113 print_hex_dump(KERN_ERR,
114 "aead null enc shdesc@" __stringify(__LINE__)": ",
115 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
116#endif
117}
118EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
119
120/**
121 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
122 * (non-protocol) with no (null) decryption.
123 * @desc: pointer to buffer used for descriptor construction
124 * @adata: pointer to authentication transform definitions. Note that since a
125 * split key is to be used, the size of the split key itself is
126 * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
127 * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
128 * @icvsize: integrity check value (ICV) size (truncated or full)
129 *
130 * Note: Requires an MDHA split key.
131 */
132void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
133 unsigned int icvsize)
134{
135 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
136
137 init_sh_desc(desc, HDR_SHARE_SERIAL);
138
139 /* Skip if already shared */
140 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
141 JUMP_COND_SHRD);
142 if (adata->key_inline)
143 append_key_as_imm(desc, (void *)adata->key, adata->keylen_pad,
144 adata->keylen, CLASS_2 |
145 KEY_DEST_MDHA_SPLIT | KEY_ENC);
146 else
147 append_key(desc, adata->key, adata->keylen, CLASS_2 |
148 KEY_DEST_MDHA_SPLIT | KEY_ENC);
149 set_jump_tgt_here(desc, key_jump_cmd);
150
151 /* Class 2 operation */
152 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
153 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
154
155 /* assoclen + cryptlen = seqoutlen */
156 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
157
158 /* Prepare to read and write cryptlen + assoclen bytes */
159 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
160 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
161
162 /*
163 * MOVE_LEN opcode is not available in all SEC HW revisions,
164 * thus need to do some magic, i.e. self-patch the descriptor
165 * buffer.
166 */
167 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
168 MOVE_DEST_MATH2 |
169 (0x6 << MOVE_LEN_SHIFT));
170 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
171 MOVE_DEST_DESCBUF |
172 MOVE_WAITCOMP |
173 (0x8 << MOVE_LEN_SHIFT));
174
175 /* Read and write cryptlen bytes */
176 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
177
178 /*
179 * Insert a NOP here, since we need at least 4 instructions between
180 * code patching the descriptor buffer and the location being patched.
181 */
182 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
183 set_jump_tgt_here(desc, jump_cmd);
184
185 set_move_tgt_here(desc, read_move_cmd);
186 set_move_tgt_here(desc, write_move_cmd);
187 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
188 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
189 MOVE_AUX_LS);
190 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
191
192 /* Load ICV */
193 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
194 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
195
196#ifdef DEBUG
197 print_hex_dump(KERN_ERR,
198 "aead null dec shdesc@" __stringify(__LINE__)": ",
199 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
200#endif
201}
202EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
203
204static void init_sh_desc_key_aead(u32 * const desc,
205 struct alginfo * const cdata,
206 struct alginfo * const adata,
207 const bool is_rfc3686, u32 *nonce)
208{
209 u32 *key_jump_cmd;
210 unsigned int enckeylen = cdata->keylen;
211
212 /* Note: Context registers are saved. */
213 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
214
215 /* Skip if already shared */
216 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
217 JUMP_COND_SHRD);
218
219 /*
220 * RFC3686 specific:
221 * | key = {AUTH_KEY, ENC_KEY, NONCE}
222 * | enckeylen = encryption key size + nonce size
223 */
224 if (is_rfc3686)
225 enckeylen -= CTR_RFC3686_NONCE_SIZE;
226
227 if (adata->key_inline)
228 append_key_as_imm(desc, (void *)adata->key, adata->keylen_pad,
229 adata->keylen, CLASS_2 |
230 KEY_DEST_MDHA_SPLIT | KEY_ENC);
231 else
232 append_key(desc, adata->key, adata->keylen, CLASS_2 |
233 KEY_DEST_MDHA_SPLIT | KEY_ENC);
234
235 if (cdata->key_inline)
236 append_key_as_imm(desc, (void *)cdata->key, enckeylen,
237 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
238 else
239 append_key(desc, cdata->key, enckeylen, CLASS_1 |
240 KEY_DEST_CLASS_REG);
241
242 /* Load Counter into CONTEXT1 reg */
243 if (is_rfc3686) {
244 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
245 LDST_CLASS_IND_CCB |
246 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
247 append_move(desc,
248 MOVE_SRC_OUTFIFO |
249 MOVE_DEST_CLASS1CTX |
250 (16 << MOVE_OFFSET_SHIFT) |
251 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
252 }
253
254 set_jump_tgt_here(desc, key_jump_cmd);
255}
256
257/**
258 * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
259 * (non-protocol).
260 * @desc: pointer to buffer used for descriptor construction
261 * @cdata: pointer to block cipher transform definitions
262 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
263 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
264 * @adata: pointer to authentication transform definitions. Note that since a
265 * split key is to be used, the size of the split key itself is
266 * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
267 * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
268 * @icvsize: integrity check value (ICV) size (truncated or full)
269 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
270 * @nonce: pointer to rfc3686 nonce
271 * @ctx1_iv_off: IV offset in CONTEXT1 register
272 *
273 * Note: Requires an MDHA split key.
274 */
275void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
276 struct alginfo *adata, unsigned int icvsize,
277 const bool is_rfc3686, u32 *nonce,
278 const u32 ctx1_iv_off)
279{
280 /* Note: Context registers are saved. */
281 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
282
283 /* Class 2 operation */
284 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
285 OP_ALG_ENCRYPT);
286
287 /* Read and write assoclen bytes */
288 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
289 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
290
291 /* Skip assoc data */
292 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
293
294 /* read assoc before reading payload */
295 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
296 FIFOLDST_VLF);
297
298 /* Load Counter into CONTEXT1 reg */
299 if (is_rfc3686)
300 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
301 LDST_SRCDST_BYTE_CONTEXT |
302 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
303 LDST_OFFSET_SHIFT));
304
305 /* Class 1 operation */
306 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
307 OP_ALG_ENCRYPT);
308
309 /* Read and write cryptlen bytes */
310 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
311 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
312 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
313
314 /* Write ICV */
315 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
316 LDST_SRCDST_BYTE_CONTEXT);
317
318#ifdef DEBUG
319 print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
320 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
321#endif
322}
323EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
324
325/**
326 * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
327 * (non-protocol).
328 * @desc: pointer to buffer used for descriptor construction
329 * @cdata: pointer to block cipher transform definitions
330 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
331 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
332 * @adata: pointer to authentication transform definitions. Note that since a
333 * split key is to be used, the size of the split key itself is
334 * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
335 * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
336 * @ivsize: initialization vector size
337 * @icvsize: integrity check value (ICV) size (truncated or full)
338 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
339 * @nonce: pointer to rfc3686 nonce
340 * @ctx1_iv_off: IV offset in CONTEXT1 register
341 *
342 * Note: Requires an MDHA split key.
343 */
344void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
345 struct alginfo *adata, unsigned int ivsize,
346 unsigned int icvsize, const bool geniv,
347 const bool is_rfc3686, u32 *nonce,
348 const u32 ctx1_iv_off)
349{
350 /* Note: Context registers are saved. */
351 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
352
353 /* Class 2 operation */
354 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
355 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
356
357 /* Read and write assoclen bytes */
358 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
359 if (geniv)
360 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
361 else
362 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
363
364 /* Skip assoc data */
365 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
366
367 /* read assoc before reading payload */
368 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
369 KEY_VLF);
370
371 if (geniv) {
372 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
373 LDST_SRCDST_BYTE_CONTEXT |
374 (ctx1_iv_off << LDST_OFFSET_SHIFT));
375 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
376 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
377 }
378
379 /* Load Counter into CONTEXT1 reg */
380 if (is_rfc3686)
381 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
382 LDST_SRCDST_BYTE_CONTEXT |
383 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
384 LDST_OFFSET_SHIFT));
385
386 /* Choose operation */
387 if (ctx1_iv_off)
388 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
389 OP_ALG_DECRYPT);
390 else
391 append_dec_op1(desc, cdata->algtype);
392
393 /* Read and write cryptlen bytes */
394 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
395 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
396 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
397
398 /* Load ICV */
399 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
400 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
401
402#ifdef DEBUG
403 print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
404 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
405#endif
406}
407EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
408
409/**
410 * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
411 * (non-protocol) with HW-generated initialization
412 * vector.
413 * @desc: pointer to buffer used for descriptor construction
414 * @cdata: pointer to block cipher transform definitions
415 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
416 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
417 * @adata: pointer to authentication transform definitions. Note that since a
418 * split key is to be used, the size of the split key itself is
419 * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
420 * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
421 * @ivsize: initialization vector size
422 * @icvsize: integrity check value (ICV) size (truncated or full)
423 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
424 * @nonce: pointer to rfc3686 nonce
425 * @ctx1_iv_off: IV offset in CONTEXT1 register
426 *
427 * Note: Requires an MDHA split key.
428 */
429void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
430 struct alginfo *adata, unsigned int ivsize,
431 unsigned int icvsize, const bool is_rfc3686,
432 u32 *nonce, const u32 ctx1_iv_off)
433{
434 u32 geniv, moveiv;
435
436 /* Note: Context registers are saved. */
437 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
438
439 if (is_rfc3686)
440 goto copy_iv;
441
442 /* Generate IV */
443 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
444 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
445 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
446 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
447 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
448 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
449 append_move(desc, MOVE_WAITCOMP |
450 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
451 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
452 (ivsize << MOVE_LEN_SHIFT));
453 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
454
455copy_iv:
456 /* Copy IV to class 1 context */
457 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
458 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
459 (ivsize << MOVE_LEN_SHIFT));
460
461 /* Return to encryption */
462 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
463 OP_ALG_ENCRYPT);
464
465 /* Read and write assoclen bytes */
466 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
467 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
468
469 /* Skip assoc data */
470 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
471
472 /* read assoc before reading payload */
473 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
474 KEY_VLF);
475
476 /* Copy iv from outfifo to class 2 fifo */
477 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
478 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
479 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
480 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
481 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
482 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
483
484 /* Load Counter into CONTEXT1 reg */
485 if (is_rfc3686)
486 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
487 LDST_SRCDST_BYTE_CONTEXT |
488 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
489 LDST_OFFSET_SHIFT));
490
491 /* Class 1 operation */
492 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
493 OP_ALG_ENCRYPT);
494
495 /* Will write ivsize + cryptlen */
496 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
497
498 /* Not need to reload iv */
499 append_seq_fifo_load(desc, ivsize,
500 FIFOLD_CLASS_SKIP);
501
502 /* Will read cryptlen */
503 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
504 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
505 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
506 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
507
508 /* Write ICV */
509 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
510 LDST_SRCDST_BYTE_CONTEXT);
511
512#ifdef DEBUG
513 print_hex_dump(KERN_ERR,
514 "aead givenc shdesc@" __stringify(__LINE__)": ",
515 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
516#endif
517}
518EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
519
520/**
521 * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
522 * @desc: pointer to buffer used for descriptor construction
523 * @cdata: pointer to block cipher transform definitions
524 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
525 * @icvsize: integrity check value (ICV) size (truncated or full)
526 */
527void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
528 unsigned int icvsize)
529{
530 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
531 *zero_assoc_jump_cmd2;
532
533 init_sh_desc(desc, HDR_SHARE_SERIAL);
534
535 /* skip key loading if they are loaded due to sharing */
536 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
537 JUMP_COND_SHRD | JUMP_COND_SELF);
538 if (cdata->key_inline)
539 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
540 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
541 else
542 append_key(desc, cdata->key, cdata->keylen, CLASS_1 |
543 KEY_DEST_CLASS_REG);
544 set_jump_tgt_here(desc, key_jump_cmd);
545
546 /* class 1 operation */
547 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
548 OP_ALG_ENCRYPT);
549
550 /* if assoclen + cryptlen is ZERO, skip to ICV write */
551 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
552 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
553 JUMP_COND_MATH_Z);
554
555 /* if assoclen is ZERO, skip reading the assoc data */
556 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
557 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
558 JUMP_COND_MATH_Z);
559
560 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
561
562 /* skip assoc data */
563 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
564
565 /* cryptlen = seqinlen - assoclen */
566 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
567
568 /* if cryptlen is ZERO jump to zero-payload commands */
569 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
570 JUMP_COND_MATH_Z);
571
572 /* read assoc data */
573 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
574 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
575 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
576
577 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
578
579 /* write encrypted data */
580 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
581
582 /* read payload data */
583 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
584 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
585
586 /* jump the zero-payload commands */
587 append_jump(desc, JUMP_TEST_ALL | 2);
588
589 /* zero-payload commands */
590 set_jump_tgt_here(desc, zero_payload_jump_cmd);
591
592 /* read assoc data */
593 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
594 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
595
596 /* There is no input data */
597 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
598
599 /* write ICV */
600 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
601 LDST_SRCDST_BYTE_CONTEXT);
602
603#ifdef DEBUG
604 print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
605 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
606#endif
607}
608EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
609
610/**
611 * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
612 * @desc: pointer to buffer used for descriptor construction
613 * @cdata: pointer to block cipher transform definitions
614 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
615 * @icvsize: integrity check value (ICV) size (truncated or full)
616 */
617void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
618 unsigned int icvsize)
619{
620 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
621
622 init_sh_desc(desc, HDR_SHARE_SERIAL);
623
624 /* skip key loading if they are loaded due to sharing */
625 key_jump_cmd = append_jump(desc, JUMP_JSL |
626 JUMP_TEST_ALL | JUMP_COND_SHRD |
627 JUMP_COND_SELF);
628 if (cdata->key_inline)
629 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
630 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
631 else
632 append_key(desc, cdata->key, cdata->keylen, CLASS_1 |
633 KEY_DEST_CLASS_REG);
634 set_jump_tgt_here(desc, key_jump_cmd);
635
636 /* class 1 operation */
637 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
638 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
639
640 /* if assoclen is ZERO, skip reading the assoc data */
641 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
642 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
643 JUMP_COND_MATH_Z);
644
645 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
646
647 /* skip assoc data */
648 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
649
650 /* read assoc data */
651 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
652 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
653
654 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
655
656 /* cryptlen = seqoutlen - assoclen */
657 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
658
659 /* jump to zero-payload command if cryptlen is zero */
660 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
661 JUMP_COND_MATH_Z);
662
663 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
664
665 /* store encrypted data */
666 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
667
668 /* read payload data */
669 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
670 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
671
672 /* zero-payload command */
673 set_jump_tgt_here(desc, zero_payload_jump_cmd);
674
675 /* read ICV */
676 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
677 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
678
679#ifdef DEBUG
680 print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
681 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
682#endif
683}
684EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
685
686/**
687 * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
688 * (non-protocol).
689 * @desc: pointer to buffer used for descriptor construction
690 * @cdata: pointer to block cipher transform definitions
691 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
692 * @icvsize: integrity check value (ICV) size (truncated or full)
693 */
694void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
695 unsigned int icvsize)
696{
697 u32 *key_jump_cmd;
698
699 init_sh_desc(desc, HDR_SHARE_SERIAL);
700
701 /* Skip key loading if it is loaded due to sharing */
702 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
703 JUMP_COND_SHRD);
704 if (cdata->key_inline)
705 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
706 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
707 else
708 append_key(desc, cdata->key, cdata->keylen, CLASS_1 |
709 KEY_DEST_CLASS_REG);
710 set_jump_tgt_here(desc, key_jump_cmd);
711
712 /* Class 1 operation */
713 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
714 OP_ALG_ENCRYPT);
715
716 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
717 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
718
719 /* Read assoc data */
720 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
721 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
722
723 /* Skip IV */
724 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
725
726 /* Will read cryptlen bytes */
727 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
728
729 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
730 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
731
732 /* Skip assoc data */
733 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
734
735 /* cryptlen = seqoutlen - assoclen */
736 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
737
738 /* Write encrypted data */
739 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
740
741 /* Read payload data */
742 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
743 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
744
745 /* Write ICV */
746 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
747 LDST_SRCDST_BYTE_CONTEXT);
748
749#ifdef DEBUG
750 print_hex_dump(KERN_ERR,
751 "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
752 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
753#endif
754}
755EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
756
757/**
758 * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
759 * (non-protocol).
760 * @desc: pointer to buffer used for descriptor construction
761 * @cdata: pointer to block cipher transform definitions
762 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
763 * @icvsize: integrity check value (ICV) size (truncated or full)
764 */
765void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
766 unsigned int icvsize)
767{
768 u32 *key_jump_cmd;
769
770 init_sh_desc(desc, HDR_SHARE_SERIAL);
771
772 /* Skip key loading if it is loaded due to sharing */
773 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
774 JUMP_COND_SHRD);
775 if (cdata->key_inline)
776 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
777 cdata->keylen, CLASS_1 |
778 KEY_DEST_CLASS_REG);
779 else
780 append_key(desc, cdata->key, cdata->keylen, CLASS_1 |
781 KEY_DEST_CLASS_REG);
782 set_jump_tgt_here(desc, key_jump_cmd);
783
784 /* Class 1 operation */
785 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
786 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
787
788 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
789 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
790
791 /* Read assoc data */
792 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
793 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
794
795 /* Skip IV */
796 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
797
798 /* Will read cryptlen bytes */
799 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
800
801 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
802 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
803
804 /* Skip assoc data */
805 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
806
807 /* Will write cryptlen bytes */
808 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
809
810 /* Store payload data */
811 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
812
813 /* Read encrypted data */
814 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
815 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
816
817 /* Read ICV */
818 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
819 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
820
821#ifdef DEBUG
822 print_hex_dump(KERN_ERR,
823 "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
824 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
825#endif
826}
827EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
828
829/**
830 * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
831 * (non-protocol).
832 * @desc: pointer to buffer used for descriptor construction
833 * @cdata: pointer to block cipher transform definitions
834 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
835 * @icvsize: integrity check value (ICV) size (truncated or full)
836 */
837void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
838 unsigned int icvsize)
839{
840 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
841
842 init_sh_desc(desc, HDR_SHARE_SERIAL);
843
844 /* Skip key loading if it is loaded due to sharing */
845 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
846 JUMP_COND_SHRD);
847 if (cdata->key_inline)
848 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
849 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
850 else
851 append_key(desc, cdata->key, cdata->keylen, CLASS_1 |
852 KEY_DEST_CLASS_REG);
853 set_jump_tgt_here(desc, key_jump_cmd);
854
855 /* Class 1 operation */
856 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
857 OP_ALG_ENCRYPT);
858
859 /* assoclen + cryptlen = seqinlen */
860 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
861
862 /*
863 * MOVE_LEN opcode is not available in all SEC HW revisions,
864 * thus need to do some magic, i.e. self-patch the descriptor
865 * buffer.
866 */
867 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
868 (0x6 << MOVE_LEN_SHIFT));
869 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
870 (0x8 << MOVE_LEN_SHIFT));
871
872 /* Will read assoclen + cryptlen bytes */
873 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
874
875 /* Will write assoclen + cryptlen bytes */
876 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
877
878 /* Read and write assoclen + cryptlen bytes */
879 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
880
881 set_move_tgt_here(desc, read_move_cmd);
882 set_move_tgt_here(desc, write_move_cmd);
883 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
884 /* Move payload data to OFIFO */
885 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
886
887 /* Write ICV */
888 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
889 LDST_SRCDST_BYTE_CONTEXT);
890
891#ifdef DEBUG
892 print_hex_dump(KERN_ERR,
893 "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
894 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
895#endif
896}
897EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
898
899/**
900 * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
901 * (non-protocol).
902 * @desc: pointer to buffer used for descriptor construction
903 * @cdata: pointer to block cipher transform definitions
904 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
905 * @icvsize: integrity check value (ICV) size (truncated or full)
906 */
907void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
908 unsigned int icvsize)
909{
910 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
911
912 init_sh_desc(desc, HDR_SHARE_SERIAL);
913
914 /* Skip key loading if it is loaded due to sharing */
915 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
916 JUMP_COND_SHRD);
917 if (cdata->key_inline)
918 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
919 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
920 else
921 append_key(desc, cdata->key, cdata->keylen, CLASS_1 |
922 KEY_DEST_CLASS_REG);
923 set_jump_tgt_here(desc, key_jump_cmd);
924
925 /* Class 1 operation */
926 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
927 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
928
929 /* assoclen + cryptlen = seqoutlen */
930 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
931
932 /*
933 * MOVE_LEN opcode is not available in all SEC HW revisions,
934 * thus need to do some magic, i.e. self-patch the descriptor
935 * buffer.
936 */
937 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
938 (0x6 << MOVE_LEN_SHIFT));
939 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
940 (0x8 << MOVE_LEN_SHIFT));
941
942 /* Will read assoclen + cryptlen bytes */
943 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
944
945 /* Will write assoclen + cryptlen bytes */
946 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
947
948 /* Store payload data */
949 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
950
951 /* In-snoop assoclen + cryptlen data */
952 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
953 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
954
955 set_move_tgt_here(desc, read_move_cmd);
956 set_move_tgt_here(desc, write_move_cmd);
957 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
958 /* Move payload data to OFIFO */
959 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
960 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
961
962 /* Read ICV */
963 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
964 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
965
966#ifdef DEBUG
967 print_hex_dump(KERN_ERR,
968 "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
969 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
970#endif
971}
972EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
973
974/*
975 * For ablkcipher encrypt and decrypt, read from req->src and
976 * write to req->dst
977 */
978static inline void ablkcipher_append_src_dst(u32 *desc)
979{
980 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
981 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
982 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
983 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
984 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
985}
986
987/**
988 * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
989 * @desc: pointer to buffer used for descriptor construction
990 * @cdata: pointer to block cipher transform definitions
991 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
992 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
993 * @ivsize: initialization vector size
994 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
995 * @ctx1_iv_off: IV offset in CONTEXT1 register
996 */
997void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
998 unsigned int ivsize, const bool is_rfc3686,
999 const u32 ctx1_iv_off)
1000{
1001 u32 *key_jump_cmd;
1002
1003 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1004 /* Skip if already shared */
1005 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1006 JUMP_COND_SHRD);
1007
1008 /* Load class1 key only */
1009 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
1010 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1011
1012 /* Load nonce into CONTEXT1 reg */
1013 if (is_rfc3686) {
1014 u8 *nonce = (u8 *)cdata->key + cdata->keylen;
1015
1016 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1017 LDST_CLASS_IND_CCB |
1018 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1019 append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
1020 MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
1021 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1022 }
1023
1024 set_jump_tgt_here(desc, key_jump_cmd);
1025
1026 /* Load iv */
1027 append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
1028 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1029
1030 /* Load counter into CONTEXT1 reg */
1031 if (is_rfc3686)
1032 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1033 LDST_SRCDST_BYTE_CONTEXT |
1034 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1035 LDST_OFFSET_SHIFT));
1036
1037 /* Load operation */
1038 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
1039 OP_ALG_ENCRYPT);
1040
1041 /* Perform operation */
1042 ablkcipher_append_src_dst(desc);
1043
1044#ifdef DEBUG
1045 print_hex_dump(KERN_ERR,
1046 "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
1047 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1048#endif
1049}
1050EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
1051
1052/**
1053 * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
1054 * @desc: pointer to buffer used for descriptor construction
1055 * @cdata: pointer to block cipher transform definitions
1056 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
1057 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
1058 * @ivsize: initialization vector size
1059 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
1060 * @ctx1_iv_off: IV offset in CONTEXT1 register
1061 */
1062void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
1063 unsigned int ivsize, const bool is_rfc3686,
1064 const u32 ctx1_iv_off)
1065{
1066 u32 *key_jump_cmd;
1067
1068 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1069 /* Skip if already shared */
1070 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1071 JUMP_COND_SHRD);
1072
1073 /* Load class1 key only */
1074 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
1075 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1076
1077 /* Load nonce into CONTEXT1 reg */
1078 if (is_rfc3686) {
1079 u8 *nonce = (u8 *)cdata->key + cdata->keylen;
1080
1081 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1082 LDST_CLASS_IND_CCB |
1083 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1084 append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
1085 MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
1086 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1087 }
1088
1089 set_jump_tgt_here(desc, key_jump_cmd);
1090
1091 /* load IV */
1092 append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
1093 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1094
1095 /* Load counter into CONTEXT1 reg */
1096 if (is_rfc3686)
1097 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1098 LDST_SRCDST_BYTE_CONTEXT |
1099 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1100 LDST_OFFSET_SHIFT));
1101
1102 /* Choose operation */
1103 if (ctx1_iv_off)
1104 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
1105 OP_ALG_DECRYPT);
1106 else
1107 append_dec_op1(desc, cdata->algtype);
1108
1109 /* Perform operation */
1110 ablkcipher_append_src_dst(desc);
1111
1112#ifdef DEBUG
1113 print_hex_dump(KERN_ERR,
1114 "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
1115 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1116#endif
1117}
1118EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
1119
1120/**
1121 * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
1122 * with HW-generated initialization vector.
1123 * @desc: pointer to buffer used for descriptor construction
1124 * @cdata: pointer to block cipher transform definitions
1125 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
1126 * with OP_ALG_AAI_CBC.
1127 * @ivsize: initialization vector size
1128 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
1129 * @ctx1_iv_off: IV offset in CONTEXT1 register
1130 */
1131void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
1132 unsigned int ivsize, const bool is_rfc3686,
1133 const u32 ctx1_iv_off)
1134{
1135 u32 *key_jump_cmd, geniv;
1136
1137 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1138 /* Skip if already shared */
1139 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1140 JUMP_COND_SHRD);
1141
1142 /* Load class1 key only */
1143 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
1144 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1145
1146 /* Load Nonce into CONTEXT1 reg */
1147 if (is_rfc3686) {
1148 u8 *nonce = (u8 *)cdata->key + cdata->keylen;
1149
1150 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1151 LDST_CLASS_IND_CCB |
1152 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1153 append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
1154 MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
1155 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1156 }
1157 set_jump_tgt_here(desc, key_jump_cmd);
1158
1159 /* Generate IV */
1160 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1161 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
1162 (ivsize << NFIFOENTRY_DLEN_SHIFT);
1163 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1164 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1165 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1166 append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
1167 MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
1168 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1169 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1170
1171 /* Copy generated IV to memory */
1172 append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
1173 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1174
1175 /* Load Counter into CONTEXT1 reg */
1176 if (is_rfc3686)
1177 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1178 LDST_SRCDST_BYTE_CONTEXT |
1179 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1180 LDST_OFFSET_SHIFT));
1181
1182 if (ctx1_iv_off)
1183 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1184 (1 << JUMP_OFFSET_SHIFT));
1185
1186 /* Load operation */
1187 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
1188 OP_ALG_ENCRYPT);
1189
1190 /* Perform operation */
1191 ablkcipher_append_src_dst(desc);
1192
1193#ifdef DEBUG
1194 print_hex_dump(KERN_ERR,
1195 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1196 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1197#endif
1198}
1199EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
1200
1201/**
1202 * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
1203 * descriptor
1204 * @desc: pointer to buffer used for descriptor construction
1205 * @cdata: pointer to block cipher transform definitions
1206 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
1207 */
1208void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
1209{
1210 __be64 sector_size = cpu_to_be64(512);
1211 u32 *key_jump_cmd;
1212
1213 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1214 /* Skip if already shared */
1215 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1216 JUMP_COND_SHRD);
1217
1218 /* Load class1 keys only */
1219 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
1220 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1221
1222 /* Load sector size with index 40 bytes (0x28) */
1223 append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
1224 LDST_SRCDST_BYTE_CONTEXT |
1225 (0x28 << LDST_OFFSET_SHIFT));
1226
1227 set_jump_tgt_here(desc, key_jump_cmd);
1228
1229 /*
1230 * create sequence for loading the sector index
1231 * Upper 8B of IV - will be used as sector index
1232 * Lower 8B of IV - will be discarded
1233 */
1234 append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1235 (0x20 << LDST_OFFSET_SHIFT));
1236 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1237
1238 /* Load operation */
1239 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
1240 OP_ALG_ENCRYPT);
1241
1242 /* Perform operation */
1243 ablkcipher_append_src_dst(desc);
1244
1245#ifdef DEBUG
1246 print_hex_dump(KERN_ERR,
1247 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1248 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1249#endif
1250}
1251EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
1252
1253/**
1254 * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
1255 * descriptor
1256 * @desc: pointer to buffer used for descriptor construction
1257 * @cdata: pointer to block cipher transform definitions
1258 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
1259 */
1260void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
1261{
1262 __be64 sector_size = cpu_to_be64(512);
1263 u32 *key_jump_cmd;
1264
1265 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1266 /* Skip if already shared */
1267 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1268 JUMP_COND_SHRD);
1269
1270 /* Load class1 key only */
1271 append_key_as_imm(desc, (void *)cdata->key, cdata->keylen,
1272 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1273
1274 /* Load sector size with index 40 bytes (0x28) */
1275 append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
1276 LDST_SRCDST_BYTE_CONTEXT |
1277 (0x28 << LDST_OFFSET_SHIFT));
1278
1279 set_jump_tgt_here(desc, key_jump_cmd);
1280
1281 /*
1282 * create sequence for loading the sector index
1283 * Upper 8B of IV - will be used as sector index
1284 * Lower 8B of IV - will be discarded
1285 */
1286 append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1287 (0x20 << LDST_OFFSET_SHIFT));
1288 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1289
1290 /* Load operation */
1291 append_dec_op1(desc, cdata->algtype);
1292
1293 /* Perform operation */
1294 ablkcipher_append_src_dst(desc);
1295
1296#ifdef DEBUG
1297 print_hex_dump(KERN_ERR,
1298 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1299 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1300#endif
1301}
1302EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 000000000000..95551737333a
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,97 @@
1/*
2 * Shared descriptors for aead, ablkcipher algorithms
3 *
4 * Copyright 2016 NXP
5 */
6
7#ifndef _CAAMALG_DESC_H_
8#define _CAAMALG_DESC_H_
9
10/* length of descriptors text */
11#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
12#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
13#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
14#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
15
16/* Note: Nonce is counted in cdata.keylen */
17#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
18
19#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
20#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
21#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
22
23#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
24#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
25#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
26
27#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
28#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
29#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
30
31#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
32#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
33#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
34
35#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
36#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
37 20 * CAAM_CMD_SZ)
38#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
39 15 * CAAM_CMD_SZ)
40
41void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
42 unsigned int icvsize);
43
44void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
45 unsigned int icvsize);
46
47void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
48 struct alginfo *adata, unsigned int icvsize,
49 const bool is_rfc3686, u32 *nonce,
50 const u32 ctx1_iv_off);
51
52void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
53 struct alginfo *adata, unsigned int ivsize,
54 unsigned int icvsize, const bool geniv,
55 const bool is_rfc3686, u32 *nonce,
56 const u32 ctx1_iv_off);
57
58void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
59 struct alginfo *adata, unsigned int ivsize,
60 unsigned int icvsize, const bool is_rfc3686,
61 u32 *nonce, const u32 ctx1_iv_off);
62
63void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
64 unsigned int icvsize);
65
66void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
67 unsigned int icvsize);
68
69void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
70 unsigned int icvsize);
71
72void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
73 unsigned int icvsize);
74
75void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
76 unsigned int icvsize);
77
78void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
79 unsigned int icvsize);
80
81void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
82 unsigned int ivsize, const bool is_rfc3686,
83 const u32 ctx1_iv_off);
84
85void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
86 unsigned int ivsize, const bool is_rfc3686,
87 const u32 ctx1_iv_off);
88
89void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
90 unsigned int ivsize, const bool is_rfc3686,
91 const u32 ctx1_iv_off);
92
93void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
94
95void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
96
97#endif /* _CAAMALG_DESC_H_ */