diff options
author | Yuan Kang <Yuan.Kang@freescale.com> | 2011-07-14 23:21:42 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2011-07-14 23:21:42 -0400 |
commit | 1acebad3d8db8d5220b3010c2eb160c625434cf2 (patch) | |
tree | 29f21d77d77e06b5f0cc6bb8fa05c848dfd3676f /drivers/crypto | |
parent | 885e9e2fd3f009de56dd265f4ecd8740e9ad5aaa (diff) |
crypto: caam - faster aead implementation
Job descriptors only contain header and seq pointers.
Other commands are stored in separate shared descriptors
for encrypt, decrypt and givencrypt, stored as arrays
in caam_ctx.
This requires additional macros to create math commands
to calculate assoclen and cryptlen.
Signed-off-by: Yuan Kang <Yuan.Kang@freescale.com>
Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 1104 | ||||
-rw-r--r-- | drivers/crypto/caam/desc_constr.h | 58 |
2 files changed, 832 insertions, 330 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 403b293509ba..ed7d59d168af 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -62,10 +62,16 @@ | |||
62 | #define CAAM_MAX_IV_LENGTH 16 | 62 | #define CAAM_MAX_IV_LENGTH 16 |
63 | 63 | ||
64 | /* length of descriptors text */ | 64 | /* length of descriptors text */ |
65 | #define DESC_AEAD_SHARED_TEXT_LEN 4 | 65 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) |
66 | #define DESC_AEAD_ENCRYPT_TEXT_LEN 21 | 66 | |
67 | #define DESC_AEAD_DECRYPT_TEXT_LEN 24 | 67 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
68 | #define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27 | 68 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) |
69 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) | ||
70 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) | ||
71 | |||
72 | #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ | ||
73 | CAAM_MAX_KEY_SIZE) | ||
74 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) | ||
69 | 75 | ||
70 | #ifdef DEBUG | 76 | #ifdef DEBUG |
71 | /* for print_hex_dumps with line references */ | 77 | /* for print_hex_dumps with line references */ |
@@ -76,17 +82,77 @@ | |||
76 | #define debug(format, arg...) | 82 | #define debug(format, arg...) |
77 | #endif | 83 | #endif |
78 | 84 | ||
85 | /* Set DK bit in class 1 operation if shared */ | ||
86 | static inline void append_dec_op1(u32 *desc, u32 type) | ||
87 | { | ||
88 | u32 *jump_cmd, *uncond_jump_cmd; | ||
89 | |||
90 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
91 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
92 | OP_ALG_DECRYPT); | ||
93 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
94 | set_jump_tgt_here(desc, jump_cmd); | ||
95 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
96 | OP_ALG_DECRYPT | OP_ALG_AAI_DK); | ||
97 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Wait for completion of class 1 key loading before allowing | ||
102 | * error propagation | ||
103 | */ | ||
104 | static inline void append_dec_shr_done(u32 *desc) | ||
105 | { | ||
106 | u32 *jump_cmd; | ||
107 | |||
108 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); | ||
109 | set_jump_tgt_here(desc, jump_cmd); | ||
110 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * For aead functions, read payload and write payload, | ||
115 | * both of which are specified in req->src and req->dst | ||
116 | */ | ||
117 | static inline void aead_append_src_dst(u32 *desc, u32 msg_type) | ||
118 | { | ||
119 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | | ||
120 | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); | ||
121 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * For aead encrypt and decrypt, read iv for both classes | ||
126 | */ | ||
127 | static inline void aead_append_ld_iv(u32 *desc, int ivsize) | ||
128 | { | ||
129 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
130 | LDST_CLASS_1_CCB | ivsize); | ||
131 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * If all data, including src (with assoc and iv) or dst (with iv only) are | ||
136 | * contiguous | ||
137 | */ | ||
138 | #define GIV_SRC_CONTIG 1 | ||
139 | #define GIV_DST_CONTIG (1 << 1) | ||
140 | |||
79 | /* | 141 | /* |
80 | * per-session context | 142 | * per-session context |
81 | */ | 143 | */ |
82 | struct caam_ctx { | 144 | struct caam_ctx { |
83 | struct device *jrdev; | 145 | struct device *jrdev; |
84 | u32 *sh_desc; | 146 | u32 sh_desc_enc[DESC_MAX_USED_LEN]; |
85 | dma_addr_t shared_desc_phys; | 147 | u32 sh_desc_dec[DESC_MAX_USED_LEN]; |
148 | u32 sh_desc_givenc[DESC_MAX_USED_LEN]; | ||
149 | dma_addr_t sh_desc_enc_dma; | ||
150 | dma_addr_t sh_desc_dec_dma; | ||
151 | dma_addr_t sh_desc_givenc_dma; | ||
86 | u32 class1_alg_type; | 152 | u32 class1_alg_type; |
87 | u32 class2_alg_type; | 153 | u32 class2_alg_type; |
88 | u32 alg_op; | 154 | u32 alg_op; |
89 | u8 *key; | 155 | u8 key[CAAM_MAX_KEY_SIZE]; |
90 | dma_addr_t key_dma; | 156 | dma_addr_t key_dma; |
91 | unsigned int enckeylen; | 157 | unsigned int enckeylen; |
92 | unsigned int split_key_len; | 158 | unsigned int split_key_len; |
@@ -94,12 +160,275 @@ struct caam_ctx { | |||
94 | unsigned int authsize; | 160 | unsigned int authsize; |
95 | }; | 161 | }; |
96 | 162 | ||
163 | static void append_key_aead(u32 *desc, struct caam_ctx *ctx, | ||
164 | int keys_fit_inline) | ||
165 | { | ||
166 | if (keys_fit_inline) { | ||
167 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
168 | ctx->split_key_len, CLASS_2 | | ||
169 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
170 | append_key_as_imm(desc, (void *)ctx->key + | ||
171 | ctx->split_key_pad_len, ctx->enckeylen, | ||
172 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
173 | } else { | ||
174 | append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
175 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
176 | append_key(desc, ctx->key_dma + ctx->split_key_pad_len, | ||
177 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, | ||
182 | int keys_fit_inline) | ||
183 | { | ||
184 | u32 *key_jump_cmd; | ||
185 | |||
186 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
187 | |||
188 | /* Skip if already shared */ | ||
189 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
190 | JUMP_COND_SHRD); | ||
191 | |||
192 | append_key_aead(desc, ctx, keys_fit_inline); | ||
193 | |||
194 | set_jump_tgt_here(desc, key_jump_cmd); | ||
195 | |||
196 | /* Propagate errors from shared to job descriptor */ | ||
197 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
198 | } | ||
199 | |||
200 | static int aead_set_sh_desc(struct crypto_aead *aead) | ||
201 | { | ||
202 | struct aead_tfm *tfm = &aead->base.crt_aead; | ||
203 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
204 | struct device *jrdev = ctx->jrdev; | ||
205 | bool keys_fit_inline = 0; | ||
206 | u32 *key_jump_cmd, *jump_cmd; | ||
207 | u32 geniv, moveiv; | ||
208 | u32 *desc; | ||
209 | |||
210 | if (!ctx->enckeylen || !ctx->authsize) | ||
211 | return 0; | ||
212 | |||
213 | /* | ||
214 | * Job Descriptor and Shared Descriptors | ||
215 | * must all fit into the 64-word Descriptor h/w Buffer | ||
216 | */ | ||
217 | if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + | ||
218 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
219 | CAAM_DESC_BYTES_MAX) | ||
220 | keys_fit_inline = 1; | ||
221 | |||
222 | /* aead_encrypt shared descriptor */ | ||
223 | desc = ctx->sh_desc_enc; | ||
224 | |||
225 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline); | ||
226 | |||
227 | /* Class 2 operation */ | ||
228 | append_operation(desc, ctx->class2_alg_type | | ||
229 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
230 | |||
231 | /* cryptlen = seqoutlen - authsize */ | ||
232 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
233 | |||
234 | /* assoclen + cryptlen = seqinlen - ivsize */ | ||
235 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); | ||
236 | |||
237 | /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ | ||
238 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); | ||
239 | |||
240 | /* read assoc before reading payload */ | ||
241 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
242 | KEY_VLF); | ||
243 | aead_append_ld_iv(desc, tfm->ivsize); | ||
244 | |||
245 | /* Class 1 operation */ | ||
246 | append_operation(desc, ctx->class1_alg_type | | ||
247 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
248 | |||
249 | /* Read and write cryptlen bytes */ | ||
250 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
251 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
252 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | ||
253 | |||
254 | /* Write ICV */ | ||
255 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | ||
256 | LDST_SRCDST_BYTE_CONTEXT); | ||
257 | |||
258 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | ||
259 | desc_bytes(desc), | ||
260 | DMA_TO_DEVICE); | ||
261 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | ||
262 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
263 | return -ENOMEM; | ||
264 | } | ||
265 | #ifdef DEBUG | ||
266 | print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", | ||
267 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
268 | desc_bytes(desc), 1); | ||
269 | #endif | ||
270 | |||
271 | /* | ||
272 | * Job Descriptor and Shared Descriptors | ||
273 | * must all fit into the 64-word Descriptor h/w Buffer | ||
274 | */ | ||
275 | if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + | ||
276 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
277 | CAAM_DESC_BYTES_MAX) | ||
278 | keys_fit_inline = 1; | ||
279 | |||
280 | desc = ctx->sh_desc_dec; | ||
281 | |||
282 | /* aead_decrypt shared descriptor */ | ||
283 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
284 | |||
285 | /* Skip if already shared */ | ||
286 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
287 | JUMP_COND_SHRD); | ||
288 | |||
289 | append_key_aead(desc, ctx, keys_fit_inline); | ||
290 | |||
291 | /* Only propagate error immediately if shared */ | ||
292 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
293 | set_jump_tgt_here(desc, key_jump_cmd); | ||
294 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
295 | set_jump_tgt_here(desc, jump_cmd); | ||
296 | |||
297 | /* Class 2 operation */ | ||
298 | append_operation(desc, ctx->class2_alg_type | | ||
299 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
300 | |||
301 | /* assoclen + cryptlen = seqinlen - ivsize */ | ||
302 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | ||
303 | ctx->authsize + tfm->ivsize) | ||
304 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
305 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
306 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | ||
307 | |||
308 | /* read assoc before reading payload */ | ||
309 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
310 | KEY_VLF); | ||
311 | |||
312 | aead_append_ld_iv(desc, tfm->ivsize); | ||
313 | |||
314 | append_dec_op1(desc, ctx->class1_alg_type); | ||
315 | |||
316 | /* Read and write cryptlen bytes */ | ||
317 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
318 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
319 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG); | ||
320 | |||
321 | /* Load ICV */ | ||
322 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | | ||
323 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
324 | append_dec_shr_done(desc); | ||
325 | |||
326 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | ||
327 | desc_bytes(desc), | ||
328 | DMA_TO_DEVICE); | ||
329 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { | ||
330 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
331 | return -ENOMEM; | ||
332 | } | ||
333 | #ifdef DEBUG | ||
334 | print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", | ||
335 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
336 | desc_bytes(desc), 1); | ||
337 | #endif | ||
338 | |||
339 | /* | ||
340 | * Job Descriptor and Shared Descriptors | ||
341 | * must all fit into the 64-word Descriptor h/w Buffer | ||
342 | */ | ||
343 | if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + | ||
344 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
345 | CAAM_DESC_BYTES_MAX) | ||
346 | keys_fit_inline = 1; | ||
347 | |||
348 | /* aead_givencrypt shared descriptor */ | ||
349 | desc = ctx->sh_desc_givenc; | ||
350 | |||
351 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline); | ||
352 | |||
353 | /* Generate IV */ | ||
354 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
355 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | ||
356 | NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
357 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
358 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
359 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
360 | append_move(desc, MOVE_SRC_INFIFO | | ||
361 | MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
362 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
363 | |||
364 | /* Copy IV to class 1 context */ | ||
365 | append_move(desc, MOVE_SRC_CLASS1CTX | | ||
366 | MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
367 | |||
368 | /* Return to encryption */ | ||
369 | append_operation(desc, ctx->class2_alg_type | | ||
370 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
371 | |||
372 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
373 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
374 | |||
375 | /* assoclen = seqinlen - (ivsize + cryptlen) */ | ||
376 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
377 | |||
378 | /* read assoc before reading payload */ | ||
379 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
380 | KEY_VLF); | ||
381 | |||
382 | /* Copy iv from class 1 ctx to class 2 fifo*/ | ||
383 | moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | | ||
384 | NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
385 | append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | | ||
386 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
387 | append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | | ||
388 | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); | ||
389 | |||
390 | /* Class 1 operation */ | ||
391 | append_operation(desc, ctx->class1_alg_type | | ||
392 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
393 | |||
394 | /* Will write ivsize + cryptlen */ | ||
395 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
396 | |||
397 | /* Not need to reload iv */ | ||
398 | append_seq_fifo_load(desc, tfm->ivsize, | ||
399 | FIFOLD_CLASS_SKIP); | ||
400 | |||
401 | /* Will read cryptlen */ | ||
402 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
403 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | ||
404 | |||
405 | /* Write ICV */ | ||
406 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | ||
407 | LDST_SRCDST_BYTE_CONTEXT); | ||
408 | |||
409 | ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, | ||
410 | desc_bytes(desc), | ||
411 | DMA_TO_DEVICE); | ||
412 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | ||
413 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
414 | return -ENOMEM; | ||
415 | } | ||
416 | #ifdef DEBUG | ||
417 | print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", | ||
418 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
419 | desc_bytes(desc), 1); | ||
420 | #endif | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
97 | static int aead_setauthsize(struct crypto_aead *authenc, | 425 | static int aead_setauthsize(struct crypto_aead *authenc, |
98 | unsigned int authsize) | 426 | unsigned int authsize) |
99 | { | 427 | { |
100 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 428 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
101 | 429 | ||
102 | ctx->authsize = authsize; | 430 | ctx->authsize = authsize; |
431 | aead_set_sh_desc(authenc); | ||
103 | 432 | ||
104 | return 0; | 433 | return 0; |
105 | } | 434 | } |
@@ -117,6 +446,7 @@ static void split_key_done(struct device *dev, u32 *desc, u32 err, | |||
117 | #ifdef DEBUG | 446 | #ifdef DEBUG |
118 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 447 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
119 | #endif | 448 | #endif |
449 | |||
120 | if (err) { | 450 | if (err) { |
121 | char tmp[CAAM_ERROR_STR_MAX]; | 451 | char tmp[CAAM_ERROR_STR_MAX]; |
122 | 452 | ||
@@ -220,72 +550,6 @@ static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | |||
220 | return ret; | 550 | return ret; |
221 | } | 551 | } |
222 | 552 | ||
223 | static int build_sh_desc_ipsec(struct caam_ctx *ctx) | ||
224 | { | ||
225 | struct device *jrdev = ctx->jrdev; | ||
226 | u32 *sh_desc; | ||
227 | u32 *jump_cmd; | ||
228 | bool keys_fit_inline = 0; | ||
229 | |||
230 | /* | ||
231 | * largest Job Descriptor and its Shared Descriptor | ||
232 | * must both fit into the 64-word Descriptor h/w Buffer | ||
233 | */ | ||
234 | if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN + | ||
235 | DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ + | ||
236 | ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | ||
237 | keys_fit_inline = 1; | ||
238 | |||
239 | /* build shared descriptor for this session */ | ||
240 | sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN + | ||
241 | (keys_fit_inline ? | ||
242 | ctx->split_key_pad_len + ctx->enckeylen : | ||
243 | CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL); | ||
244 | if (!sh_desc) { | ||
245 | dev_err(jrdev, "could not allocate shared descriptor\n"); | ||
246 | return -ENOMEM; | ||
247 | } | ||
248 | |||
249 | init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL); | ||
250 | |||
251 | jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL | | ||
252 | JUMP_COND_SHRD | JUMP_COND_SELF); | ||
253 | |||
254 | /* | ||
255 | * process keys, starting with class 2/authentication. | ||
256 | */ | ||
257 | if (keys_fit_inline) { | ||
258 | append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len, | ||
259 | ctx->split_key_len, | ||
260 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
261 | |||
262 | append_key_as_imm(sh_desc, (void *)ctx->key + | ||
263 | ctx->split_key_pad_len, ctx->enckeylen, | ||
264 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
265 | } else { | ||
266 | append_key(sh_desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
267 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
268 | append_key(sh_desc, ctx->key_dma + ctx->split_key_pad_len, | ||
269 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
270 | } | ||
271 | |||
272 | /* update jump cmd now that we are at the jump target */ | ||
273 | set_jump_tgt_here(sh_desc, jump_cmd); | ||
274 | |||
275 | ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc, | ||
276 | desc_bytes(sh_desc), | ||
277 | DMA_TO_DEVICE); | ||
278 | if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) { | ||
279 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
280 | kfree(sh_desc); | ||
281 | return -ENOMEM; | ||
282 | } | ||
283 | |||
284 | ctx->sh_desc = sh_desc; | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static int aead_setkey(struct crypto_aead *aead, | 553 | static int aead_setkey(struct crypto_aead *aead, |
290 | const u8 *key, unsigned int keylen) | 554 | const u8 *key, unsigned int keylen) |
291 | { | 555 | { |
@@ -326,16 +590,9 @@ static int aead_setkey(struct crypto_aead *aead, | |||
326 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | 590 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", |
327 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 591 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
328 | #endif | 592 | #endif |
329 | ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen, | ||
330 | GFP_KERNEL | GFP_DMA); | ||
331 | if (!ctx->key) { | ||
332 | dev_err(jrdev, "could not allocate key output memory\n"); | ||
333 | return -ENOMEM; | ||
334 | } | ||
335 | 593 | ||
336 | ret = gen_split_key(ctx, key, authkeylen); | 594 | ret = gen_split_key(ctx, key, authkeylen); |
337 | if (ret) { | 595 | if (ret) { |
338 | kfree(ctx->key); | ||
339 | goto badkey; | 596 | goto badkey; |
340 | } | 597 | } |
341 | 598 | ||
@@ -346,7 +603,6 @@ static int aead_setkey(struct crypto_aead *aead, | |||
346 | enckeylen, DMA_TO_DEVICE); | 603 | enckeylen, DMA_TO_DEVICE); |
347 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | 604 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
348 | dev_err(jrdev, "unable to map key i/o memory\n"); | 605 | dev_err(jrdev, "unable to map key i/o memory\n"); |
349 | kfree(ctx->key); | ||
350 | return -ENOMEM; | 606 | return -ENOMEM; |
351 | } | 607 | } |
352 | #ifdef DEBUG | 608 | #ifdef DEBUG |
@@ -357,11 +613,10 @@ static int aead_setkey(struct crypto_aead *aead, | |||
357 | 613 | ||
358 | ctx->enckeylen = enckeylen; | 614 | ctx->enckeylen = enckeylen; |
359 | 615 | ||
360 | ret = build_sh_desc_ipsec(ctx); | 616 | ret = aead_set_sh_desc(aead); |
361 | if (ret) { | 617 | if (ret) { |
362 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + | 618 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + |
363 | enckeylen, DMA_TO_DEVICE); | 619 | enckeylen, DMA_TO_DEVICE); |
364 | kfree(ctx->key); | ||
365 | } | 620 | } |
366 | 621 | ||
367 | return ret; | 622 | return ret; |
@@ -379,10 +634,11 @@ struct link_tbl_entry { | |||
379 | }; | 634 | }; |
380 | 635 | ||
381 | /* | 636 | /* |
382 | * aead_edesc - s/w-extended ipsec_esp descriptor | 637 | * aead_edesc - s/w-extended aead descriptor |
638 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | ||
383 | * @src_nents: number of segments in input scatterlist | 639 | * @src_nents: number of segments in input scatterlist |
384 | * @dst_nents: number of segments in output scatterlist | 640 | * @dst_nents: number of segments in output scatterlist |
385 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | 641 | * @iv_dma: dma address of iv for checking continuity and link table |
386 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | 642 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) |
387 | * @link_tbl_bytes: length of dma mapped link_tbl space | 643 | * @link_tbl_bytes: length of dma mapped link_tbl space |
388 | * @link_tbl_dma: bus physical mapped address of h/w link table | 644 | * @link_tbl_dma: bus physical mapped address of h/w link table |
@@ -392,37 +648,47 @@ struct aead_edesc { | |||
392 | int assoc_nents; | 648 | int assoc_nents; |
393 | int src_nents; | 649 | int src_nents; |
394 | int dst_nents; | 650 | int dst_nents; |
651 | dma_addr_t iv_dma; | ||
395 | int link_tbl_bytes; | 652 | int link_tbl_bytes; |
396 | dma_addr_t link_tbl_dma; | 653 | dma_addr_t link_tbl_dma; |
397 | struct link_tbl_entry *link_tbl; | 654 | struct link_tbl_entry *link_tbl; |
398 | u32 hw_desc[0]; | 655 | u32 hw_desc[0]; |
399 | }; | 656 | }; |
400 | 657 | ||
401 | static void aead_unmap(struct device *dev, | 658 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
402 | struct aead_edesc *edesc, | 659 | struct scatterlist *dst, int src_nents, int dst_nents, |
403 | struct aead_request *req) | 660 | dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, |
661 | int link_tbl_bytes) | ||
404 | { | 662 | { |
405 | dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | 663 | if (unlikely(dst != src)) { |
406 | 664 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | |
407 | if (unlikely(req->dst != req->src)) { | 665 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); |
408 | dma_unmap_sg(dev, req->src, edesc->src_nents, | ||
409 | DMA_TO_DEVICE); | ||
410 | dma_unmap_sg(dev, req->dst, edesc->dst_nents, | ||
411 | DMA_FROM_DEVICE); | ||
412 | } else { | 666 | } else { |
413 | dma_unmap_sg(dev, req->src, edesc->src_nents, | 667 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); |
414 | DMA_BIDIRECTIONAL); | ||
415 | } | 668 | } |
416 | 669 | ||
417 | if (edesc->link_tbl_bytes) | 670 | if (iv_dma) |
418 | dma_unmap_single(dev, edesc->link_tbl_dma, | 671 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
419 | edesc->link_tbl_bytes, | 672 | if (link_tbl_bytes) |
673 | dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, | ||
420 | DMA_TO_DEVICE); | 674 | DMA_TO_DEVICE); |
421 | } | 675 | } |
422 | 676 | ||
423 | /* | 677 | static void aead_unmap(struct device *dev, |
424 | * ipsec_esp descriptor callbacks | 678 | struct aead_edesc *edesc, |
425 | */ | 679 | struct aead_request *req) |
680 | { | ||
681 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
682 | int ivsize = crypto_aead_ivsize(aead); | ||
683 | |||
684 | dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | ||
685 | |||
686 | caam_unmap(dev, req->src, req->dst, | ||
687 | edesc->src_nents, edesc->dst_nents, | ||
688 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | ||
689 | edesc->link_tbl_bytes); | ||
690 | } | ||
691 | |||
426 | static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | 692 | static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, |
427 | void *context) | 693 | void *context) |
428 | { | 694 | { |
@@ -430,11 +696,12 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
430 | struct aead_edesc *edesc; | 696 | struct aead_edesc *edesc; |
431 | #ifdef DEBUG | 697 | #ifdef DEBUG |
432 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 698 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
433 | int ivsize = crypto_aead_ivsize(aead); | ||
434 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 699 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
700 | int ivsize = crypto_aead_ivsize(aead); | ||
435 | 701 | ||
436 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 702 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
437 | #endif | 703 | #endif |
704 | |||
438 | edesc = (struct aead_edesc *)((char *)desc - | 705 | edesc = (struct aead_edesc *)((char *)desc - |
439 | offsetof(struct aead_edesc, hw_desc)); | 706 | offsetof(struct aead_edesc, hw_desc)); |
440 | 707 | ||
@@ -472,12 +739,23 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
472 | #ifdef DEBUG | 739 | #ifdef DEBUG |
473 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 740 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
474 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 741 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
742 | int ivsize = crypto_aead_ivsize(aead); | ||
475 | 743 | ||
476 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 744 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
477 | #endif | 745 | #endif |
746 | |||
478 | edesc = (struct aead_edesc *)((char *)desc - | 747 | edesc = (struct aead_edesc *)((char *)desc - |
479 | offsetof(struct aead_edesc, hw_desc)); | 748 | offsetof(struct aead_edesc, hw_desc)); |
480 | 749 | ||
750 | #ifdef DEBUG | ||
751 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
752 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
753 | ivsize, 1); | ||
754 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
755 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), | ||
756 | req->cryptlen, 1); | ||
757 | #endif | ||
758 | |||
481 | if (err) { | 759 | if (err) { |
482 | char tmp[CAAM_ERROR_STR_MAX]; | 760 | char tmp[CAAM_ERROR_STR_MAX]; |
483 | 761 | ||
@@ -506,241 +784,271 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
506 | sg->length + ctx->authsize + 16, 1); | 784 | sg->length + ctx->authsize + 16, 1); |
507 | } | 785 | } |
508 | #endif | 786 | #endif |
787 | |||
509 | kfree(edesc); | 788 | kfree(edesc); |
510 | 789 | ||
511 | aead_request_complete(req, err); | 790 | aead_request_complete(req, err); |
512 | } | 791 | } |
513 | 792 | ||
793 | static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, | ||
794 | dma_addr_t dma, u32 len, u32 offset) | ||
795 | { | ||
796 | link_tbl_ptr->ptr = dma; | ||
797 | link_tbl_ptr->len = len; | ||
798 | link_tbl_ptr->reserved = 0; | ||
799 | link_tbl_ptr->buf_pool_id = 0; | ||
800 | link_tbl_ptr->offset = offset; | ||
801 | #ifdef DEBUG | ||
802 | print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", | ||
803 | DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, | ||
804 | sizeof(struct link_tbl_entry), 1); | ||
805 | #endif | ||
806 | } | ||
807 | |||
514 | /* | 808 | /* |
515 | * convert scatterlist to h/w link table format | 809 | * convert scatterlist to h/w link table format |
516 | * scatterlist must have been previously dma mapped | 810 | * but does not have final bit; instead, returns last entry |
517 | */ | 811 | */ |
518 | static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, | 812 | static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, |
519 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | 813 | int sg_count, struct link_tbl_entry |
814 | *link_tbl_ptr, u32 offset) | ||
520 | { | 815 | { |
521 | while (sg_count) { | 816 | while (sg_count) { |
522 | link_tbl_ptr->ptr = sg_dma_address(sg); | 817 | sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), |
523 | link_tbl_ptr->len = sg_dma_len(sg); | 818 | sg_dma_len(sg), offset); |
524 | link_tbl_ptr->reserved = 0; | ||
525 | link_tbl_ptr->buf_pool_id = 0; | ||
526 | link_tbl_ptr->offset = offset; | ||
527 | link_tbl_ptr++; | 819 | link_tbl_ptr++; |
528 | sg = sg_next(sg); | 820 | sg = sg_next(sg); |
529 | sg_count--; | 821 | sg_count--; |
530 | } | 822 | } |
823 | return link_tbl_ptr - 1; | ||
824 | } | ||
531 | 825 | ||
532 | /* set Final bit (marks end of link table) */ | 826 | /* |
533 | link_tbl_ptr--; | 827 | * convert scatterlist to h/w link table format |
828 | * scatterlist must have been previously dma mapped | ||
829 | */ | ||
830 | static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, | ||
831 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | ||
832 | { | ||
833 | link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); | ||
534 | link_tbl_ptr->len |= 0x40000000; | 834 | link_tbl_ptr->len |= 0x40000000; |
535 | } | 835 | } |
536 | 836 | ||
537 | /* | 837 | /* |
538 | * fill in and submit ipsec_esp job descriptor | 838 | * Fill in aead job descriptor |
539 | */ | 839 | */ |
540 | static int init_aead_job(struct aead_edesc *edesc, struct aead_request *req, | 840 | static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, |
541 | u32 encrypt, | 841 | struct aead_edesc *edesc, |
542 | void (*callback) (struct device *dev, u32 *desc, | 842 | struct aead_request *req, |
543 | u32 err, void *context)) | 843 | bool all_contig, bool encrypt) |
544 | { | 844 | { |
545 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 845 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
546 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 846 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
547 | struct device *jrdev = ctx->jrdev; | ||
548 | u32 *desc = edesc->hw_desc, options; | ||
549 | int ret, sg_count, assoc_sg_count; | ||
550 | int ivsize = crypto_aead_ivsize(aead); | 847 | int ivsize = crypto_aead_ivsize(aead); |
551 | int authsize = ctx->authsize; | 848 | int authsize = ctx->authsize; |
552 | dma_addr_t ptr, dst_dma, src_dma; | 849 | u32 *desc = edesc->hw_desc; |
553 | #ifdef DEBUG | 850 | u32 out_options = 0, in_options; |
554 | u32 *sh_desc = ctx->sh_desc; | 851 | dma_addr_t dst_dma, src_dma; |
852 | int len, link_tbl_index = 0; | ||
555 | 853 | ||
854 | #ifdef DEBUG | ||
556 | debug("assoclen %d cryptlen %d authsize %d\n", | 855 | debug("assoclen %d cryptlen %d authsize %d\n", |
557 | req->assoclen, req->cryptlen, authsize); | 856 | req->assoclen, req->cryptlen, authsize); |
558 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | 857 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", |
559 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | 858 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
560 | req->assoclen , 1); | 859 | req->assoclen , 1); |
561 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | 860 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", |
562 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, | 861 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
563 | edesc->src_nents ? 100 : ivsize, 1); | 862 | edesc->src_nents ? 100 : ivsize, 1); |
564 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | 863 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", |
565 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 864 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
566 | edesc->src_nents ? 100 : req->cryptlen + authsize, 1); | 865 | edesc->src_nents ? 100 : req->cryptlen, 1); |
567 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | 866 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", |
568 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | 867 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, |
569 | desc_bytes(sh_desc), 1); | 868 | desc_bytes(sh_desc), 1); |
570 | #endif | 869 | #endif |
571 | assoc_sg_count = dma_map_sg(jrdev, req->assoc, edesc->assoc_nents ?: 1, | ||
572 | DMA_TO_DEVICE); | ||
573 | if (req->src == req->dst) | ||
574 | sg_count = dma_map_sg(jrdev, req->src, edesc->src_nents ? : 1, | ||
575 | DMA_BIDIRECTIONAL); | ||
576 | else | ||
577 | sg_count = dma_map_sg(jrdev, req->src, edesc->src_nents ? : 1, | ||
578 | DMA_TO_DEVICE); | ||
579 | 870 | ||
580 | /* start auth operation */ | 871 | len = desc_len(sh_desc); |
581 | append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | | 872 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); |
582 | (encrypt ? : OP_ALG_ICV_ON)); | ||
583 | 873 | ||
584 | /* Load FIFO with data for Class 2 CHA */ | 874 | if (all_contig) { |
585 | options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; | 875 | src_dma = sg_dma_address(req->assoc); |
586 | if (!edesc->assoc_nents) { | 876 | in_options = 0; |
587 | ptr = sg_dma_address(req->assoc); | ||
588 | } else { | 877 | } else { |
589 | sg_to_link_tbl(req->assoc, edesc->assoc_nents, | 878 | src_dma = edesc->link_tbl_dma; |
590 | edesc->link_tbl, 0); | 879 | link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + |
591 | ptr = edesc->link_tbl_dma; | 880 | (edesc->src_nents ? : 1); |
592 | options |= LDST_SGF; | 881 | in_options = LDST_SGF; |
593 | } | 882 | } |
594 | append_fifo_load(desc, ptr, req->assoclen, options); | 883 | if (encrypt) |
595 | 884 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | |
596 | /* copy iv from cipher/class1 input context to class2 infifo */ | 885 | req->cryptlen - authsize, in_options); |
597 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | 886 | else |
598 | 887 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | |
599 | if (!encrypt) { | 888 | req->cryptlen, in_options); |
600 | u32 *jump_cmd, *uncond_jump_cmd; | ||
601 | |||
602 | /* JUMP if shared */ | ||
603 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
604 | |||
605 | /* start class 1 (cipher) operation, non-shared version */ | ||
606 | append_operation(desc, ctx->class1_alg_type | | ||
607 | OP_ALG_AS_INITFINAL); | ||
608 | |||
609 | uncond_jump_cmd = append_jump(desc, 0); | ||
610 | |||
611 | set_jump_tgt_here(desc, jump_cmd); | ||
612 | |||
613 | /* start class 1 (cipher) operation, shared version */ | ||
614 | append_operation(desc, ctx->class1_alg_type | | ||
615 | OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK); | ||
616 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
617 | } else | ||
618 | append_operation(desc, ctx->class1_alg_type | | ||
619 | OP_ALG_AS_INITFINAL | encrypt); | ||
620 | 889 | ||
621 | /* load payload & instruct to class2 to snoop class 1 if encrypting */ | 890 | if (likely(req->src == req->dst)) { |
622 | options = 0; | 891 | if (all_contig) { |
623 | if (!edesc->src_nents) { | 892 | dst_dma = sg_dma_address(req->src); |
624 | src_dma = sg_dma_address(req->src); | 893 | } else { |
625 | } else { | 894 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * |
626 | sg_to_link_tbl(req->src, edesc->src_nents, edesc->link_tbl + | 895 | ((edesc->assoc_nents ? : 1) + 1); |
627 | edesc->assoc_nents, 0); | 896 | out_options = LDST_SGF; |
628 | src_dma = edesc->link_tbl_dma + edesc->assoc_nents * | 897 | } |
629 | sizeof(struct link_tbl_entry); | ||
630 | options |= LDST_SGF; | ||
631 | } | ||
632 | append_seq_in_ptr(desc, src_dma, req->cryptlen + authsize, options); | ||
633 | append_seq_fifo_load(desc, req->cryptlen, FIFOLD_CLASS_BOTH | | ||
634 | FIFOLD_TYPE_LASTBOTH | | ||
635 | (encrypt ? FIFOLD_TYPE_MSG1OUT2 | ||
636 | : FIFOLD_TYPE_MSG)); | ||
637 | |||
638 | /* specify destination */ | ||
639 | if (req->src == req->dst) { | ||
640 | dst_dma = src_dma; | ||
641 | } else { | 898 | } else { |
642 | sg_count = dma_map_sg(jrdev, req->dst, edesc->dst_nents ? : 1, | ||
643 | DMA_FROM_DEVICE); | ||
644 | if (!edesc->dst_nents) { | 899 | if (!edesc->dst_nents) { |
645 | dst_dma = sg_dma_address(req->dst); | 900 | dst_dma = sg_dma_address(req->dst); |
646 | options = 0; | ||
647 | } else { | 901 | } else { |
648 | sg_to_link_tbl(req->dst, edesc->dst_nents, | 902 | dst_dma = edesc->link_tbl_dma + |
649 | edesc->link_tbl + edesc->assoc_nents + | 903 | link_tbl_index * |
650 | edesc->src_nents, 0); | ||
651 | dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + | ||
652 | edesc->src_nents) * | ||
653 | sizeof(struct link_tbl_entry); | 904 | sizeof(struct link_tbl_entry); |
654 | options = LDST_SGF; | 905 | out_options = LDST_SGF; |
655 | } | 906 | } |
656 | } | 907 | } |
657 | append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, options); | ||
658 | append_seq_fifo_store(desc, req->cryptlen, FIFOST_TYPE_MESSAGE_DATA); | ||
659 | |||
660 | /* ICV */ | ||
661 | if (encrypt) | 908 | if (encrypt) |
662 | append_seq_store(desc, authsize, LDST_CLASS_2_CCB | | 909 | append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); |
663 | LDST_SRCDST_BYTE_CONTEXT); | ||
664 | else | 910 | else |
665 | append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 | | 911 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, |
666 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | 912 | out_options); |
913 | } | ||
914 | |||
915 | /* | ||
916 | * Fill in aead givencrypt job descriptor | ||
917 | */ | ||
918 | static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | ||
919 | struct aead_edesc *edesc, | ||
920 | struct aead_request *req, | ||
921 | int contig) | ||
922 | { | ||
923 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
924 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
925 | int ivsize = crypto_aead_ivsize(aead); | ||
926 | int authsize = ctx->authsize; | ||
927 | u32 *desc = edesc->hw_desc; | ||
928 | u32 out_options = 0, in_options; | ||
929 | dma_addr_t dst_dma, src_dma; | ||
930 | int len, link_tbl_index = 0; | ||
667 | 931 | ||
668 | #ifdef DEBUG | 932 | #ifdef DEBUG |
669 | debug("job_desc_len %d\n", desc_len(desc)); | 933 | debug("assoclen %d cryptlen %d authsize %d\n", |
670 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 934 | req->assoclen, req->cryptlen, authsize); |
671 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1); | 935 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", |
672 | print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ", | 936 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
673 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | 937 | req->assoclen , 1); |
674 | edesc->link_tbl_bytes, 1); | 938 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", |
939 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | ||
940 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | ||
941 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
942 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); | ||
943 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | ||
944 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | ||
945 | desc_bytes(sh_desc), 1); | ||
675 | #endif | 946 | #endif |
676 | 947 | ||
677 | ret = caam_jr_enqueue(jrdev, desc, callback, req); | 948 | len = desc_len(sh_desc); |
678 | if (!ret) | 949 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); |
679 | ret = -EINPROGRESS; | 950 | |
680 | else { | 951 | if (contig & GIV_SRC_CONTIG) { |
681 | aead_unmap(jrdev, edesc, req); | 952 | src_dma = sg_dma_address(req->assoc); |
682 | kfree(edesc); | 953 | in_options = 0; |
954 | } else { | ||
955 | src_dma = edesc->link_tbl_dma; | ||
956 | link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; | ||
957 | in_options = LDST_SGF; | ||
683 | } | 958 | } |
959 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | ||
960 | req->cryptlen - authsize, in_options); | ||
684 | 961 | ||
685 | return ret; | 962 | if (contig & GIV_DST_CONTIG) { |
963 | dst_dma = edesc->iv_dma; | ||
964 | } else { | ||
965 | if (likely(req->src == req->dst)) { | ||
966 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * | ||
967 | edesc->assoc_nents; | ||
968 | out_options = LDST_SGF; | ||
969 | } else { | ||
970 | dst_dma = edesc->link_tbl_dma + | ||
971 | link_tbl_index * | ||
972 | sizeof(struct link_tbl_entry); | ||
973 | out_options = LDST_SGF; | ||
974 | } | ||
975 | } | ||
976 | |||
977 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); | ||
686 | } | 978 | } |
687 | 979 | ||
688 | /* | 980 | /* |
689 | * derive number of elements in scatterlist | 981 | * derive number of elements in scatterlist |
690 | */ | 982 | */ |
691 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | 983 | static int sg_count(struct scatterlist *sg_list, int nbytes) |
692 | { | 984 | { |
693 | struct scatterlist *sg = sg_list; | 985 | struct scatterlist *sg = sg_list; |
694 | int sg_nents = 0; | 986 | int sg_nents = 0; |
695 | 987 | ||
696 | *chained = 0; | ||
697 | while (nbytes > 0) { | 988 | while (nbytes > 0) { |
698 | sg_nents++; | 989 | sg_nents++; |
699 | nbytes -= sg->length; | 990 | nbytes -= sg->length; |
700 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | 991 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
701 | *chained = 1; | 992 | BUG(); /* Not support chaining */ |
702 | sg = scatterwalk_sg_next(sg); | 993 | sg = scatterwalk_sg_next(sg); |
703 | } | 994 | } |
704 | 995 | ||
996 | if (likely(sg_nents == 1)) | ||
997 | return 0; | ||
998 | |||
705 | return sg_nents; | 999 | return sg_nents; |
706 | } | 1000 | } |
707 | 1001 | ||
708 | /* | 1002 | /* |
709 | * allocate and map the ipsec_esp extended descriptor | 1003 | * allocate and map the aead extended descriptor |
710 | */ | 1004 | */ |
711 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | 1005 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
712 | int desc_bytes) | 1006 | int desc_bytes, bool *all_contig_ptr) |
713 | { | 1007 | { |
714 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1008 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
715 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1009 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
716 | struct device *jrdev = ctx->jrdev; | 1010 | struct device *jrdev = ctx->jrdev; |
717 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1011 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
718 | GFP_ATOMIC; | 1012 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
719 | int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; | 1013 | int assoc_nents, src_nents, dst_nents = 0; |
720 | struct aead_edesc *edesc; | 1014 | struct aead_edesc *edesc; |
1015 | dma_addr_t iv_dma = 0; | ||
1016 | int sgc; | ||
1017 | bool all_contig = true; | ||
1018 | int ivsize = crypto_aead_ivsize(aead); | ||
1019 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; | ||
721 | 1020 | ||
722 | assoc_nents = sg_count(req->assoc, req->assoclen, &chained); | 1021 | assoc_nents = sg_count(req->assoc, req->assoclen); |
723 | BUG_ON(chained); | 1022 | src_nents = sg_count(req->src, req->cryptlen); |
724 | if (likely(assoc_nents == 1)) | 1023 | |
725 | assoc_nents = 0; | 1024 | if (unlikely(req->dst != req->src)) |
726 | 1025 | dst_nents = sg_count(req->dst, req->cryptlen); | |
727 | src_nents = sg_count(req->src, req->cryptlen + ctx->authsize, | 1026 | |
728 | &chained); | 1027 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, |
729 | BUG_ON(chained); | 1028 | DMA_BIDIRECTIONAL); |
730 | if (src_nents == 1) | 1029 | if (likely(req->src == req->dst)) { |
731 | src_nents = 0; | 1030 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, |
732 | 1031 | DMA_BIDIRECTIONAL); | |
733 | if (unlikely(req->dst != req->src)) { | 1032 | } else { |
734 | dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, | 1033 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, |
735 | &chained); | 1034 | DMA_TO_DEVICE); |
736 | BUG_ON(chained); | 1035 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, |
737 | if (dst_nents == 1) | 1036 | DMA_FROM_DEVICE); |
738 | dst_nents = 0; | 1037 | } |
1038 | |||
1039 | /* Check if data are contiguous */ | ||
1040 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | ||
1041 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | ||
1042 | iv_dma || src_nents || iv_dma + ivsize != | ||
1043 | sg_dma_address(req->src)) { | ||
1044 | all_contig = false; | ||
1045 | assoc_nents = assoc_nents ? : 1; | ||
1046 | src_nents = src_nents ? : 1; | ||
1047 | link_tbl_len = assoc_nents + 1 + src_nents; | ||
739 | } | 1048 | } |
1049 | link_tbl_len += dst_nents; | ||
740 | 1050 | ||
741 | link_tbl_bytes = (assoc_nents + src_nents + dst_nents) * | 1051 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); |
742 | sizeof(struct link_tbl_entry); | ||
743 | debug("link_tbl_bytes %d\n", link_tbl_bytes); | ||
744 | 1052 | ||
745 | /* allocate space for base edesc and hw desc commands, link tables */ | 1053 | /* allocate space for base edesc and hw desc commands, link tables */ |
746 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | 1054 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + |
@@ -753,11 +1061,34 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
753 | edesc->assoc_nents = assoc_nents; | 1061 | edesc->assoc_nents = assoc_nents; |
754 | edesc->src_nents = src_nents; | 1062 | edesc->src_nents = src_nents; |
755 | edesc->dst_nents = dst_nents; | 1063 | edesc->dst_nents = dst_nents; |
1064 | edesc->iv_dma = iv_dma; | ||
1065 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
756 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | 1066 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + |
757 | desc_bytes; | 1067 | desc_bytes; |
758 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1068 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, |
759 | link_tbl_bytes, DMA_TO_DEVICE); | 1069 | link_tbl_bytes, DMA_TO_DEVICE); |
760 | edesc->link_tbl_bytes = link_tbl_bytes; | 1070 | *all_contig_ptr = all_contig; |
1071 | |||
1072 | link_tbl_index = 0; | ||
1073 | if (!all_contig) { | ||
1074 | sg_to_link_tbl(req->assoc, | ||
1075 | (assoc_nents ? : 1), | ||
1076 | edesc->link_tbl + | ||
1077 | link_tbl_index, 0); | ||
1078 | link_tbl_index += assoc_nents ? : 1; | ||
1079 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1080 | iv_dma, ivsize, 0); | ||
1081 | link_tbl_index += 1; | ||
1082 | sg_to_link_tbl_last(req->src, | ||
1083 | (src_nents ? : 1), | ||
1084 | edesc->link_tbl + | ||
1085 | link_tbl_index, 0); | ||
1086 | link_tbl_index += src_nents ? : 1; | ||
1087 | } | ||
1088 | if (dst_nents) { | ||
1089 | sg_to_link_tbl_last(req->dst, dst_nents, | ||
1090 | edesc->link_tbl + link_tbl_index, 0); | ||
1091 | } | ||
761 | 1092 | ||
762 | return edesc; | 1093 | return edesc; |
763 | } | 1094 | } |
@@ -768,62 +1099,185 @@ static int aead_encrypt(struct aead_request *req) | |||
768 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1099 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
769 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1100 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
770 | struct device *jrdev = ctx->jrdev; | 1101 | struct device *jrdev = ctx->jrdev; |
771 | int ivsize = crypto_aead_ivsize(aead); | 1102 | bool all_contig; |
772 | u32 *desc; | 1103 | u32 *desc; |
773 | dma_addr_t iv_dma; | 1104 | int ret = 0; |
1105 | |||
1106 | req->cryptlen += ctx->authsize; | ||
774 | 1107 | ||
775 | /* allocate extended descriptor */ | 1108 | /* allocate extended descriptor */ |
776 | edesc = aead_edesc_alloc(req, DESC_AEAD_ENCRYPT_TEXT_LEN * | 1109 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
777 | CAAM_CMD_SZ); | 1110 | CAAM_CMD_SZ, &all_contig); |
778 | if (IS_ERR(edesc)) | 1111 | if (IS_ERR(edesc)) |
779 | return PTR_ERR(edesc); | 1112 | return PTR_ERR(edesc); |
780 | 1113 | ||
781 | desc = edesc->hw_desc; | 1114 | /* Create and submit job descriptor */ |
782 | 1115 | init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, | |
783 | /* insert shared descriptor pointer */ | 1116 | all_contig, true); |
784 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1117 | #ifdef DEBUG |
785 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | 1118 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", |
786 | 1119 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | |
787 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | 1120 | desc_bytes(edesc->hw_desc), 1); |
788 | /* check dma error */ | 1121 | #endif |
789 | 1122 | ||
790 | append_load(desc, iv_dma, ivsize, | 1123 | desc = edesc->hw_desc; |
791 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | 1124 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); |
1125 | if (!ret) { | ||
1126 | ret = -EINPROGRESS; | ||
1127 | } else { | ||
1128 | aead_unmap(jrdev, edesc, req); | ||
1129 | kfree(edesc); | ||
1130 | } | ||
792 | 1131 | ||
793 | return init_aead_job(edesc, req, OP_ALG_ENCRYPT, aead_encrypt_done); | 1132 | return ret; |
794 | } | 1133 | } |
795 | 1134 | ||
796 | static int aead_decrypt(struct aead_request *req) | 1135 | static int aead_decrypt(struct aead_request *req) |
797 | { | 1136 | { |
1137 | struct aead_edesc *edesc; | ||
798 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1138 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
799 | int ivsize = crypto_aead_ivsize(aead); | ||
800 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1139 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
801 | struct device *jrdev = ctx->jrdev; | 1140 | struct device *jrdev = ctx->jrdev; |
802 | struct aead_edesc *edesc; | 1141 | bool all_contig; |
803 | u32 *desc; | 1142 | u32 *desc; |
804 | dma_addr_t iv_dma; | 1143 | int ret = 0; |
805 | |||
806 | req->cryptlen -= ctx->authsize; | ||
807 | 1144 | ||
808 | /* allocate extended descriptor */ | 1145 | /* allocate extended descriptor */ |
809 | edesc = aead_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN * | 1146 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
810 | CAAM_CMD_SZ); | 1147 | CAAM_CMD_SZ, &all_contig); |
811 | if (IS_ERR(edesc)) | 1148 | if (IS_ERR(edesc)) |
812 | return PTR_ERR(edesc); | 1149 | return PTR_ERR(edesc); |
813 | 1150 | ||
1151 | #ifdef DEBUG | ||
1152 | print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", | ||
1153 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1154 | req->cryptlen, 1); | ||
1155 | #endif | ||
1156 | |||
1157 | /* Create and submit job descriptor*/ | ||
1158 | init_aead_job(ctx->sh_desc_dec, | ||
1159 | ctx->sh_desc_dec_dma, edesc, req, all_contig, false); | ||
1160 | #ifdef DEBUG | ||
1161 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | ||
1162 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1163 | desc_bytes(edesc->hw_desc), 1); | ||
1164 | #endif | ||
1165 | |||
814 | desc = edesc->hw_desc; | 1166 | desc = edesc->hw_desc; |
1167 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | ||
1168 | if (!ret) { | ||
1169 | ret = -EINPROGRESS; | ||
1170 | } else { | ||
1171 | aead_unmap(jrdev, edesc, req); | ||
1172 | kfree(edesc); | ||
1173 | } | ||
815 | 1174 | ||
816 | /* insert shared descriptor pointer */ | 1175 | return ret; |
817 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1176 | } |
818 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
819 | 1177 | ||
820 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | 1178 | /* |
821 | /* check dma error */ | 1179 | * allocate and map the aead extended descriptor for aead givencrypt |
1180 | */ | ||
1181 | static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | ||
1182 | *greq, int desc_bytes, | ||
1183 | u32 *contig_ptr) | ||
1184 | { | ||
1185 | struct aead_request *req = &greq->areq; | ||
1186 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
1187 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
1188 | struct device *jrdev = ctx->jrdev; | ||
1189 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1190 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
1191 | int assoc_nents, src_nents, dst_nents = 0; | ||
1192 | struct aead_edesc *edesc; | ||
1193 | dma_addr_t iv_dma = 0; | ||
1194 | int sgc; | ||
1195 | u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; | ||
1196 | int ivsize = crypto_aead_ivsize(aead); | ||
1197 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; | ||
822 | 1198 | ||
823 | append_load(desc, iv_dma, ivsize, | 1199 | assoc_nents = sg_count(req->assoc, req->assoclen); |
824 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | 1200 | src_nents = sg_count(req->src, req->cryptlen); |
825 | 1201 | ||
826 | return init_aead_job(edesc, req, !OP_ALG_ENCRYPT, aead_decrypt_done); | 1202 | if (unlikely(req->dst != req->src)) |
1203 | dst_nents = sg_count(req->dst, req->cryptlen); | ||
1204 | |||
1205 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, | ||
1206 | DMA_BIDIRECTIONAL); | ||
1207 | if (likely(req->src == req->dst)) { | ||
1208 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1209 | DMA_BIDIRECTIONAL); | ||
1210 | } else { | ||
1211 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1212 | DMA_TO_DEVICE); | ||
1213 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | ||
1214 | DMA_FROM_DEVICE); | ||
1215 | } | ||
1216 | |||
1217 | /* Check if data are contiguous */ | ||
1218 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); | ||
1219 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | ||
1220 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) | ||
1221 | contig &= ~GIV_SRC_CONTIG; | ||
1222 | if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) | ||
1223 | contig &= ~GIV_DST_CONTIG; | ||
1224 | if (unlikely(req->src != req->dst)) { | ||
1225 | dst_nents = dst_nents ? : 1; | ||
1226 | link_tbl_len += 1; | ||
1227 | } | ||
1228 | if (!(contig & GIV_SRC_CONTIG)) { | ||
1229 | assoc_nents = assoc_nents ? : 1; | ||
1230 | src_nents = src_nents ? : 1; | ||
1231 | link_tbl_len += assoc_nents + 1 + src_nents; | ||
1232 | if (likely(req->src == req->dst)) | ||
1233 | contig &= ~GIV_DST_CONTIG; | ||
1234 | } | ||
1235 | link_tbl_len += dst_nents; | ||
1236 | |||
1237 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | ||
1238 | |||
1239 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
1240 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | ||
1241 | link_tbl_bytes, GFP_DMA | flags); | ||
1242 | if (!edesc) { | ||
1243 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
1244 | return ERR_PTR(-ENOMEM); | ||
1245 | } | ||
1246 | |||
1247 | edesc->assoc_nents = assoc_nents; | ||
1248 | edesc->src_nents = src_nents; | ||
1249 | edesc->dst_nents = dst_nents; | ||
1250 | edesc->iv_dma = iv_dma; | ||
1251 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
1252 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | ||
1253 | desc_bytes; | ||
1254 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | ||
1255 | link_tbl_bytes, DMA_TO_DEVICE); | ||
1256 | *contig_ptr = contig; | ||
1257 | |||
1258 | link_tbl_index = 0; | ||
1259 | if (!(contig & GIV_SRC_CONTIG)) { | ||
1260 | sg_to_link_tbl(req->assoc, assoc_nents, | ||
1261 | edesc->link_tbl + | ||
1262 | link_tbl_index, 0); | ||
1263 | link_tbl_index += assoc_nents; | ||
1264 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1265 | iv_dma, ivsize, 0); | ||
1266 | link_tbl_index += 1; | ||
1267 | sg_to_link_tbl_last(req->src, src_nents, | ||
1268 | edesc->link_tbl + | ||
1269 | link_tbl_index, 0); | ||
1270 | link_tbl_index += src_nents; | ||
1271 | } | ||
1272 | if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { | ||
1273 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1274 | iv_dma, ivsize, 0); | ||
1275 | link_tbl_index += 1; | ||
1276 | sg_to_link_tbl_last(req->dst, dst_nents, | ||
1277 | edesc->link_tbl + link_tbl_index, 0); | ||
1278 | } | ||
1279 | |||
1280 | return edesc; | ||
827 | } | 1281 | } |
828 | 1282 | ||
829 | static int aead_givencrypt(struct aead_givcrypt_request *areq) | 1283 | static int aead_givencrypt(struct aead_givcrypt_request *areq) |
@@ -833,55 +1287,44 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
833 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1287 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
834 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1288 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
835 | struct device *jrdev = ctx->jrdev; | 1289 | struct device *jrdev = ctx->jrdev; |
836 | int ivsize = crypto_aead_ivsize(aead); | 1290 | u32 contig; |
837 | dma_addr_t iv_dma; | ||
838 | u32 *desc; | 1291 | u32 *desc; |
1292 | int ret = 0; | ||
839 | 1293 | ||
840 | iv_dma = dma_map_single(jrdev, areq->giv, ivsize, DMA_FROM_DEVICE); | 1294 | req->cryptlen += ctx->authsize; |
841 | |||
842 | debug("%s: giv %p\n", __func__, areq->giv); | ||
843 | 1295 | ||
844 | /* allocate extended descriptor */ | 1296 | /* allocate extended descriptor */ |
845 | edesc = aead_edesc_alloc(req, DESC_AEAD_GIVENCRYPT_TEXT_LEN * | 1297 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * |
846 | CAAM_CMD_SZ); | 1298 | CAAM_CMD_SZ, &contig); |
1299 | |||
847 | if (IS_ERR(edesc)) | 1300 | if (IS_ERR(edesc)) |
848 | return PTR_ERR(edesc); | 1301 | return PTR_ERR(edesc); |
849 | 1302 | ||
850 | desc = edesc->hw_desc; | 1303 | #ifdef DEBUG |
851 | 1304 | print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", | |
852 | /* insert shared descriptor pointer */ | 1305 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
853 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1306 | req->cryptlen, 1); |
854 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | 1307 | #endif |
855 | |||
856 | /* | ||
857 | * LOAD IMM Info FIFO | ||
858 | * to DECO, Last, Padding, Random, Message, 16 bytes | ||
859 | */ | ||
860 | append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 | | ||
861 | NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | | ||
862 | NFIFOENTRY_PTYPE_RND | ivsize, | ||
863 | LDST_SRCDST_WORD_INFO_FIFO); | ||
864 | |||
865 | /* | ||
866 | * disable info fifo entries since the above serves as the entry | ||
867 | * this way, the MOVE command won't generate an entry. | ||
868 | * Note that this isn't required in more recent versions of | ||
869 | * SEC as a MOVE that doesn't do info FIFO entries is available. | ||
870 | */ | ||
871 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
872 | |||
873 | /* MOVE DECO Alignment -> C1 Context 16 bytes */ | ||
874 | append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize); | ||
875 | |||
876 | /* re-enable info fifo entries */ | ||
877 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
878 | 1308 | ||
879 | /* MOVE C1 Context -> OFIFO 16 bytes */ | 1309 | /* Create and submit job descriptor*/ |
880 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize); | 1310 | init_aead_giv_job(ctx->sh_desc_givenc, |
1311 | ctx->sh_desc_givenc_dma, edesc, req, contig); | ||
1312 | #ifdef DEBUG | ||
1313 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | ||
1314 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1315 | desc_bytes(edesc->hw_desc), 1); | ||
1316 | #endif | ||
881 | 1317 | ||
882 | append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); | 1318 | desc = edesc->hw_desc; |
1319 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); | ||
1320 | if (!ret) { | ||
1321 | ret = -EINPROGRESS; | ||
1322 | } else { | ||
1323 | aead_unmap(jrdev, edesc, req); | ||
1324 | kfree(edesc); | ||
1325 | } | ||
883 | 1326 | ||
884 | return init_aead_job(edesc, req, OP_ALG_ENCRYPT, aead_encrypt_done); | 1327 | return ret; |
885 | } | 1328 | } |
886 | 1329 | ||
887 | #define template_aead template_u.aead | 1330 | #define template_aead template_u.aead |
@@ -1120,16 +1563,19 @@ static void caam_cra_exit(struct crypto_tfm *tfm) | |||
1120 | { | 1563 | { |
1121 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 1564 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
1122 | 1565 | ||
1123 | if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys)) | 1566 | if (ctx->sh_desc_enc_dma && |
1124 | dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys, | 1567 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) |
1125 | desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); | 1568 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, |
1126 | kfree(ctx->sh_desc); | 1569 | desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); |
1127 | 1570 | if (ctx->sh_desc_dec_dma && | |
1128 | if (!dma_mapping_error(ctx->jrdev, ctx->key_dma)) | 1571 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) |
1129 | dma_unmap_single(ctx->jrdev, ctx->key_dma, | 1572 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, |
1130 | ctx->split_key_pad_len + ctx->enckeylen, | 1573 | desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); |
1574 | if (ctx->sh_desc_givenc_dma && | ||
1575 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) | ||
1576 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, | ||
1577 | desc_bytes(ctx->sh_desc_givenc), | ||
1131 | DMA_TO_DEVICE); | 1578 | DMA_TO_DEVICE); |
1132 | kfree(ctx->key); | ||
1133 | } | 1579 | } |
1134 | 1580 | ||
1135 | static void __exit caam_algapi_exit(void) | 1581 | static void __exit caam_algapi_exit(void) |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 46915800c26f..0991323cf3fd 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #define IMMEDIATE (1 << 23) | 9 | #define IMMEDIATE (1 << 23) |
10 | #define CAAM_CMD_SZ sizeof(u32) | 10 | #define CAAM_CMD_SZ sizeof(u32) |
11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) | 11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) |
12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64) | 12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) |
13 | 13 | ||
14 | #ifdef DEBUG | 14 | #ifdef DEBUG |
15 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ | 15 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ |
@@ -18,6 +18,9 @@ | |||
18 | #define PRINT_POS | 18 | #define PRINT_POS |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \ | ||
22 | LDST_SRCDST_WORD_DECOCTRL | \ | ||
23 | (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT)) | ||
21 | #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ | 24 | #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ |
22 | LDST_SRCDST_WORD_DECOCTRL | \ | 25 | LDST_SRCDST_WORD_DECOCTRL | \ |
23 | (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) | 26 | (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) |
@@ -203,3 +206,56 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ | |||
203 | append_cmd(desc, immediate); \ | 206 | append_cmd(desc, immediate); \ |
204 | } | 207 | } |
205 | APPEND_CMD_RAW_IMM(load, LOAD, u32); | 208 | APPEND_CMD_RAW_IMM(load, LOAD, u32); |
209 | |||
210 | /* | ||
211 | * Append math command. Only the last part of destination and source need to | ||
212 | * be specified | ||
213 | */ | ||
214 | #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ | ||
215 | append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ | ||
216 | MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK)); | ||
217 | |||
218 | #define append_math_add(desc, dest, src0, src1, len) \ | ||
219 | APPEND_MATH(ADD, desc, dest, src0, src1, len) | ||
220 | #define append_math_sub(desc, dest, src0, src1, len) \ | ||
221 | APPEND_MATH(SUB, desc, dest, src0, src1, len) | ||
222 | #define append_math_add_c(desc, dest, src0, src1, len) \ | ||
223 | APPEND_MATH(ADDC, desc, dest, src0, src1, len) | ||
224 | #define append_math_sub_b(desc, dest, src0, src1, len) \ | ||
225 | APPEND_MATH(SUBB, desc, dest, src0, src1, len) | ||
226 | #define append_math_and(desc, dest, src0, src1, len) \ | ||
227 | APPEND_MATH(AND, desc, dest, src0, src1, len) | ||
228 | #define append_math_or(desc, dest, src0, src1, len) \ | ||
229 | APPEND_MATH(OR, desc, dest, src0, src1, len) | ||
230 | #define append_math_xor(desc, dest, src0, src1, len) \ | ||
231 | APPEND_MATH(XOR, desc, dest, src0, src1, len) | ||
232 | #define append_math_lshift(desc, dest, src0, src1, len) \ | ||
233 | APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) | ||
234 | #define append_math_rshift(desc, dest, src0, src1, len) \ | ||
235 | APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) | ||
236 | |||
237 | /* Exactly one source is IMM. Data is passed in as u32 value */ | ||
238 | #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ | ||
239 | do { \ | ||
240 | APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ | ||
241 | append_cmd(desc, data); \ | ||
242 | } while (0); | ||
243 | |||
244 | #define append_math_add_imm_u32(desc, dest, src0, src1, data) \ | ||
245 | APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) | ||
246 | #define append_math_sub_imm_u32(desc, dest, src0, src1, data) \ | ||
247 | APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data) | ||
248 | #define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \ | ||
249 | APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data) | ||
250 | #define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \ | ||
251 | APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data) | ||
252 | #define append_math_and_imm_u32(desc, dest, src0, src1, data) \ | ||
253 | APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data) | ||
254 | #define append_math_or_imm_u32(desc, dest, src0, src1, data) \ | ||
255 | APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data) | ||
256 | #define append_math_xor_imm_u32(desc, dest, src0, src1, data) \ | ||
257 | APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data) | ||
258 | #define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \ | ||
259 | APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) | ||
260 | #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ | ||
261 | APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) | ||