diff options
Diffstat (limited to 'drivers/crypto/caam/caamalg.c')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 1268 |
1 files changed, 1268 insertions, 0 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c new file mode 100644 index 000000000000..d0e65d6ddc77 --- /dev/null +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -0,0 +1,1268 @@ | |||
1 | /* | ||
2 | * caam - Freescale FSL CAAM support for crypto API | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * Based on talitos crypto API driver. | ||
7 | * | ||
8 | * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): | ||
9 | * | ||
10 | * --------------- --------------- | ||
11 | * | JobDesc #1 |-------------------->| ShareDesc | | ||
12 | * | *(packet 1) | | (PDB) | | ||
13 | * --------------- |------------->| (hashKey) | | ||
14 | * . | | (cipherKey) | | ||
15 | * . | |-------->| (operation) | | ||
16 | * --------------- | | --------------- | ||
17 | * | JobDesc #2 |------| | | ||
18 | * | *(packet 2) | | | ||
19 | * --------------- | | ||
20 | * . | | ||
21 | * . | | ||
22 | * --------------- | | ||
23 | * | JobDesc #3 |------------ | ||
24 | * | *(packet 3) | | ||
25 | * --------------- | ||
26 | * | ||
27 | * The SharedDesc never changes for a connection unless rekeyed, but | ||
28 | * each packet will likely be in a different place. So all we need | ||
29 | * to know to process the packet is where the input is, where the | ||
30 | * output goes, and what context we want to process with. Context is | ||
31 | * in the SharedDesc, packet references in the JobDesc. | ||
32 | * | ||
33 | * So, a job desc looks like: | ||
34 | * | ||
35 | * --------------------- | ||
36 | * | Header | | ||
37 | * | ShareDesc Pointer | | ||
38 | * | SEQ_OUT_PTR | | ||
39 | * | (output buffer) | | ||
40 | * | SEQ_IN_PTR | | ||
41 | * | (input buffer) | | ||
42 | * | LOAD (to DECO) | | ||
43 | * --------------------- | ||
44 | */ | ||
45 | |||
46 | #include "compat.h" | ||
47 | |||
48 | #include "regs.h" | ||
49 | #include "intern.h" | ||
50 | #include "desc_constr.h" | ||
51 | #include "jr.h" | ||
52 | #include "error.h" | ||
53 | |||
54 | /* | ||
55 | * crypto alg | ||
56 | */ | ||
57 | #define CAAM_CRA_PRIORITY 3000 | ||
58 | /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ | ||
59 | #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ | ||
60 | SHA512_DIGEST_SIZE * 2) | ||
61 | /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | ||
62 | #define CAAM_MAX_IV_LENGTH 16 | ||
63 | |||
64 | /* length of descriptors text */ | ||
65 | #define DESC_AEAD_SHARED_TEXT_LEN 4 | ||
66 | #define DESC_AEAD_ENCRYPT_TEXT_LEN 21 | ||
67 | #define DESC_AEAD_DECRYPT_TEXT_LEN 24 | ||
68 | #define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27 | ||
69 | |||
70 | #ifdef DEBUG | ||
71 | /* for print_hex_dumps with line references */ | ||
72 | #define xstr(s) str(s) | ||
73 | #define str(s) #s | ||
74 | #define debug(format, arg...) printk(format, arg) | ||
75 | #else | ||
76 | #define debug(format, arg...) | ||
77 | #endif | ||
78 | |||
79 | /* | ||
80 | * per-session context | ||
81 | */ | ||
82 | struct caam_ctx { | ||
83 | struct device *jrdev; | ||
84 | u32 *sh_desc; | ||
85 | dma_addr_t shared_desc_phys; | ||
86 | u32 class1_alg_type; | ||
87 | u32 class2_alg_type; | ||
88 | u32 alg_op; | ||
89 | u8 *key; | ||
90 | dma_addr_t key_phys; | ||
91 | unsigned int enckeylen; | ||
92 | unsigned int split_key_len; | ||
93 | unsigned int split_key_pad_len; | ||
94 | unsigned int authsize; | ||
95 | }; | ||
96 | |||
97 | static int aead_authenc_setauthsize(struct crypto_aead *authenc, | ||
98 | unsigned int authsize) | ||
99 | { | ||
100 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | ||
101 | |||
102 | ctx->authsize = authsize; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | struct split_key_result { | ||
108 | struct completion completion; | ||
109 | int err; | ||
110 | }; | ||
111 | |||
112 | static void split_key_done(struct device *dev, u32 *desc, u32 err, | ||
113 | void *context) | ||
114 | { | ||
115 | struct split_key_result *res = context; | ||
116 | |||
117 | #ifdef DEBUG | ||
118 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
119 | #endif | ||
120 | if (err) { | ||
121 | char tmp[CAAM_ERROR_STR_MAX]; | ||
122 | |||
123 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
124 | } | ||
125 | |||
126 | res->err = err; | ||
127 | |||
128 | complete(&res->completion); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | get a split ipad/opad key | ||
133 | |||
134 | Split key generation----------------------------------------------- | ||
135 | |||
136 | [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 | ||
137 | [01] 0x04000014 key: class2->keyreg len=20 | ||
138 | @0xffe01000 | ||
139 | [03] 0x84410014 operation: cls2-op sha1 hmac init dec | ||
140 | [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm | ||
141 | [05] 0xa4000001 jump: class2 local all ->1 [06] | ||
142 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | ||
143 | @0xffe04000 | ||
144 | */ | ||
145 | static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | ||
146 | { | ||
147 | struct device *jrdev = ctx->jrdev; | ||
148 | u32 *desc; | ||
149 | struct split_key_result result; | ||
150 | dma_addr_t dma_addr_in, dma_addr_out; | ||
151 | int ret = 0; | ||
152 | |||
153 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | ||
154 | |||
155 | init_job_desc(desc, 0); | ||
156 | |||
157 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, | ||
158 | DMA_TO_DEVICE); | ||
159 | if (dma_mapping_error(jrdev, dma_addr_in)) { | ||
160 | dev_err(jrdev, "unable to map key input memory\n"); | ||
161 | kfree(desc); | ||
162 | return -ENOMEM; | ||
163 | } | ||
164 | append_key(desc, dma_addr_in, authkeylen, CLASS_2 | | ||
165 | KEY_DEST_CLASS_REG); | ||
166 | |||
167 | /* Sets MDHA up into an HMAC-INIT */ | ||
168 | append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | | ||
169 | OP_ALG_AS_INIT); | ||
170 | |||
171 | /* | ||
172 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | ||
173 | into both pads inside MDHA | ||
174 | */ | ||
175 | append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | | ||
176 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); | ||
177 | |||
178 | /* | ||
179 | * FIFO_STORE with the explicit split-key content store | ||
180 | * (0x26 output type) | ||
181 | */ | ||
182 | dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | ||
183 | DMA_FROM_DEVICE); | ||
184 | if (dma_mapping_error(jrdev, dma_addr_out)) { | ||
185 | dev_err(jrdev, "unable to map key output memory\n"); | ||
186 | kfree(desc); | ||
187 | return -ENOMEM; | ||
188 | } | ||
189 | append_fifo_store(desc, dma_addr_out, ctx->split_key_len, | ||
190 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | ||
191 | |||
192 | #ifdef DEBUG | ||
193 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
194 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); | ||
195 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
196 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
197 | #endif | ||
198 | |||
199 | result.err = 0; | ||
200 | init_completion(&result.completion); | ||
201 | |||
202 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | ||
203 | if (!ret) { | ||
204 | /* in progress */ | ||
205 | wait_for_completion_interruptible(&result.completion); | ||
206 | ret = result.err; | ||
207 | #ifdef DEBUG | ||
208 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
209 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | ||
210 | ctx->split_key_pad_len, 1); | ||
211 | #endif | ||
212 | } | ||
213 | |||
214 | dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, | ||
215 | DMA_FROM_DEVICE); | ||
216 | dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); | ||
217 | |||
218 | kfree(desc); | ||
219 | |||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | static int build_sh_desc_ipsec(struct caam_ctx *ctx) | ||
224 | { | ||
225 | struct device *jrdev = ctx->jrdev; | ||
226 | u32 *sh_desc; | ||
227 | u32 *jump_cmd; | ||
228 | bool keys_fit_inline = 0; | ||
229 | |||
230 | /* | ||
231 | * largest Job Descriptor and its Shared Descriptor | ||
232 | * must both fit into the 64-word Descriptor h/w Buffer | ||
233 | */ | ||
234 | if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN + | ||
235 | DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ + | ||
236 | ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | ||
237 | keys_fit_inline = 1; | ||
238 | |||
239 | /* build shared descriptor for this session */ | ||
240 | sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN + | ||
241 | keys_fit_inline ? | ||
242 | ctx->split_key_pad_len + ctx->enckeylen : | ||
243 | CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL); | ||
244 | if (!sh_desc) { | ||
245 | dev_err(jrdev, "could not allocate shared descriptor\n"); | ||
246 | return -ENOMEM; | ||
247 | } | ||
248 | |||
249 | init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL); | ||
250 | |||
251 | jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL | | ||
252 | JUMP_COND_SHRD | JUMP_COND_SELF); | ||
253 | |||
254 | /* | ||
255 | * process keys, starting with class 2/authentication. | ||
256 | */ | ||
257 | if (keys_fit_inline) { | ||
258 | append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len, | ||
259 | ctx->split_key_len, | ||
260 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
261 | |||
262 | append_key_as_imm(sh_desc, (void *)ctx->key + | ||
263 | ctx->split_key_pad_len, ctx->enckeylen, | ||
264 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
265 | } else { | ||
266 | append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 | | ||
267 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
268 | append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len, | ||
269 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
270 | } | ||
271 | |||
272 | /* update jump cmd now that we are at the jump target */ | ||
273 | set_jump_tgt_here(sh_desc, jump_cmd); | ||
274 | |||
275 | ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc, | ||
276 | desc_bytes(sh_desc), | ||
277 | DMA_TO_DEVICE); | ||
278 | if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) { | ||
279 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
280 | kfree(sh_desc); | ||
281 | return -ENOMEM; | ||
282 | } | ||
283 | |||
284 | ctx->sh_desc = sh_desc; | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static int aead_authenc_setkey(struct crypto_aead *aead, | ||
290 | const u8 *key, unsigned int keylen) | ||
291 | { | ||
292 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
293 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
294 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
295 | struct device *jrdev = ctx->jrdev; | ||
296 | struct rtattr *rta = (void *)key; | ||
297 | struct crypto_authenc_key_param *param; | ||
298 | unsigned int authkeylen; | ||
299 | unsigned int enckeylen; | ||
300 | int ret = 0; | ||
301 | |||
302 | param = RTA_DATA(rta); | ||
303 | enckeylen = be32_to_cpu(param->enckeylen); | ||
304 | |||
305 | key += RTA_ALIGN(rta->rta_len); | ||
306 | keylen -= RTA_ALIGN(rta->rta_len); | ||
307 | |||
308 | if (keylen < enckeylen) | ||
309 | goto badkey; | ||
310 | |||
311 | authkeylen = keylen - enckeylen; | ||
312 | |||
313 | if (keylen > CAAM_MAX_KEY_SIZE) | ||
314 | goto badkey; | ||
315 | |||
316 | /* Pick class 2 key length from algorithm submask */ | ||
317 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | ||
318 | OP_ALG_ALGSEL_SHIFT] * 2; | ||
319 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | ||
320 | |||
321 | #ifdef DEBUG | ||
322 | printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", | ||
323 | keylen, enckeylen, authkeylen); | ||
324 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | ||
325 | ctx->split_key_len, ctx->split_key_pad_len); | ||
326 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | ||
327 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
328 | #endif | ||
329 | ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen, | ||
330 | GFP_KERNEL | GFP_DMA); | ||
331 | if (!ctx->key) { | ||
332 | dev_err(jrdev, "could not allocate key output memory\n"); | ||
333 | return -ENOMEM; | ||
334 | } | ||
335 | |||
336 | ret = gen_split_key(ctx, key, authkeylen); | ||
337 | if (ret) { | ||
338 | kfree(ctx->key); | ||
339 | goto badkey; | ||
340 | } | ||
341 | |||
342 | /* postpend encryption key to auth split key */ | ||
343 | memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); | ||
344 | |||
345 | ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + | ||
346 | enckeylen, DMA_TO_DEVICE); | ||
347 | if (dma_mapping_error(jrdev, ctx->key_phys)) { | ||
348 | dev_err(jrdev, "unable to map key i/o memory\n"); | ||
349 | kfree(ctx->key); | ||
350 | return -ENOMEM; | ||
351 | } | ||
352 | #ifdef DEBUG | ||
353 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
354 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | ||
355 | ctx->split_key_pad_len + enckeylen, 1); | ||
356 | #endif | ||
357 | |||
358 | ctx->enckeylen = enckeylen; | ||
359 | |||
360 | ret = build_sh_desc_ipsec(ctx); | ||
361 | if (ret) { | ||
362 | dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len + | ||
363 | enckeylen, DMA_TO_DEVICE); | ||
364 | kfree(ctx->key); | ||
365 | } | ||
366 | |||
367 | return ret; | ||
368 | badkey: | ||
369 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
370 | return -EINVAL; | ||
371 | } | ||
372 | |||
373 | struct link_tbl_entry { | ||
374 | u64 ptr; | ||
375 | u32 len; | ||
376 | u8 reserved; | ||
377 | u8 buf_pool_id; | ||
378 | u16 offset; | ||
379 | }; | ||
380 | |||
381 | /* | ||
382 | * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor | ||
383 | * @src_nents: number of segments in input scatterlist | ||
384 | * @dst_nents: number of segments in output scatterlist | ||
385 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | ||
386 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | ||
387 | * @link_tbl_bytes: length of dma mapped link_tbl space | ||
388 | * @link_tbl_dma: bus physical mapped address of h/w link table | ||
389 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | ||
390 | */ | ||
391 | struct ipsec_esp_edesc { | ||
392 | int assoc_nents; | ||
393 | int src_nents; | ||
394 | int dst_nents; | ||
395 | int link_tbl_bytes; | ||
396 | dma_addr_t link_tbl_dma; | ||
397 | struct link_tbl_entry *link_tbl; | ||
398 | u32 hw_desc[0]; | ||
399 | }; | ||
400 | |||
401 | static void ipsec_esp_unmap(struct device *dev, | ||
402 | struct ipsec_esp_edesc *edesc, | ||
403 | struct aead_request *areq) | ||
404 | { | ||
405 | dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | ||
406 | |||
407 | if (unlikely(areq->dst != areq->src)) { | ||
408 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | ||
409 | DMA_TO_DEVICE); | ||
410 | dma_unmap_sg(dev, areq->dst, edesc->dst_nents, | ||
411 | DMA_FROM_DEVICE); | ||
412 | } else { | ||
413 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | ||
414 | DMA_BIDIRECTIONAL); | ||
415 | } | ||
416 | |||
417 | if (edesc->link_tbl_bytes) | ||
418 | dma_unmap_single(dev, edesc->link_tbl_dma, | ||
419 | edesc->link_tbl_bytes, | ||
420 | DMA_TO_DEVICE); | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * ipsec_esp descriptor callbacks | ||
425 | */ | ||
426 | static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
427 | void *context) | ||
428 | { | ||
429 | struct aead_request *areq = context; | ||
430 | struct ipsec_esp_edesc *edesc; | ||
431 | #ifdef DEBUG | ||
432 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
433 | int ivsize = crypto_aead_ivsize(aead); | ||
434 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
435 | |||
436 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
437 | #endif | ||
438 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | ||
439 | offsetof(struct ipsec_esp_edesc, hw_desc)); | ||
440 | |||
441 | if (err) { | ||
442 | char tmp[CAAM_ERROR_STR_MAX]; | ||
443 | |||
444 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
445 | } | ||
446 | |||
447 | ipsec_esp_unmap(jrdev, edesc, areq); | ||
448 | |||
449 | #ifdef DEBUG | ||
450 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | ||
451 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | ||
452 | areq->assoclen , 1); | ||
453 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
454 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | ||
455 | edesc->src_nents ? 100 : ivsize, 1); | ||
456 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
457 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | ||
458 | edesc->src_nents ? 100 : areq->cryptlen + | ||
459 | ctx->authsize + 4, 1); | ||
460 | #endif | ||
461 | |||
462 | kfree(edesc); | ||
463 | |||
464 | aead_request_complete(areq, err); | ||
465 | } | ||
466 | |||
467 | static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
468 | void *context) | ||
469 | { | ||
470 | struct aead_request *areq = context; | ||
471 | struct ipsec_esp_edesc *edesc; | ||
472 | #ifdef DEBUG | ||
473 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
474 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
475 | |||
476 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
477 | #endif | ||
478 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | ||
479 | offsetof(struct ipsec_esp_edesc, hw_desc)); | ||
480 | |||
481 | if (err) { | ||
482 | char tmp[CAAM_ERROR_STR_MAX]; | ||
483 | |||
484 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
485 | } | ||
486 | |||
487 | ipsec_esp_unmap(jrdev, edesc, areq); | ||
488 | |||
489 | /* | ||
490 | * verify hw auth check passed else return -EBADMSG | ||
491 | */ | ||
492 | if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) | ||
493 | err = -EBADMSG; | ||
494 | |||
495 | #ifdef DEBUG | ||
496 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", | ||
497 | DUMP_PREFIX_ADDRESS, 16, 4, | ||
498 | ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)), | ||
499 | sizeof(struct iphdr) + areq->assoclen + | ||
500 | ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) + | ||
501 | ctx->authsize + 36, 1); | ||
502 | if (!err && edesc->link_tbl_bytes) { | ||
503 | struct scatterlist *sg = sg_last(areq->src, edesc->src_nents); | ||
504 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | ||
505 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | ||
506 | sg->length + ctx->authsize + 16, 1); | ||
507 | } | ||
508 | #endif | ||
509 | kfree(edesc); | ||
510 | |||
511 | aead_request_complete(areq, err); | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * convert scatterlist to h/w link table format | ||
516 | * scatterlist must have been previously dma mapped | ||
517 | */ | ||
518 | static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, | ||
519 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | ||
520 | { | ||
521 | while (sg_count) { | ||
522 | link_tbl_ptr->ptr = sg_dma_address(sg); | ||
523 | link_tbl_ptr->len = sg_dma_len(sg); | ||
524 | link_tbl_ptr->reserved = 0; | ||
525 | link_tbl_ptr->buf_pool_id = 0; | ||
526 | link_tbl_ptr->offset = offset; | ||
527 | link_tbl_ptr++; | ||
528 | sg = sg_next(sg); | ||
529 | sg_count--; | ||
530 | } | ||
531 | |||
532 | /* set Final bit (marks end of link table) */ | ||
533 | link_tbl_ptr--; | ||
534 | link_tbl_ptr->len |= 0x40000000; | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * fill in and submit ipsec_esp job descriptor | ||
539 | */ | ||
540 | static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | ||
541 | u32 encrypt, | ||
542 | void (*callback) (struct device *dev, u32 *desc, | ||
543 | u32 err, void *context)) | ||
544 | { | ||
545 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
546 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
547 | struct device *jrdev = ctx->jrdev; | ||
548 | u32 *desc = edesc->hw_desc, options; | ||
549 | int ret, sg_count, assoc_sg_count; | ||
550 | int ivsize = crypto_aead_ivsize(aead); | ||
551 | int authsize = ctx->authsize; | ||
552 | dma_addr_t ptr, dst_dma, src_dma; | ||
553 | #ifdef DEBUG | ||
554 | u32 *sh_desc = ctx->sh_desc; | ||
555 | |||
556 | debug("assoclen %d cryptlen %d authsize %d\n", | ||
557 | areq->assoclen, areq->cryptlen, authsize); | ||
558 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | ||
559 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | ||
560 | areq->assoclen , 1); | ||
561 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | ||
562 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | ||
563 | edesc->src_nents ? 100 : ivsize, 1); | ||
564 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | ||
565 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | ||
566 | edesc->src_nents ? 100 : areq->cryptlen + authsize, 1); | ||
567 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | ||
568 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | ||
569 | desc_bytes(sh_desc), 1); | ||
570 | #endif | ||
571 | assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1, | ||
572 | DMA_TO_DEVICE); | ||
573 | if (areq->src == areq->dst) | ||
574 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | ||
575 | DMA_BIDIRECTIONAL); | ||
576 | else | ||
577 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | ||
578 | DMA_TO_DEVICE); | ||
579 | |||
580 | /* start auth operation */ | ||
581 | append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | | ||
582 | (encrypt ? : OP_ALG_ICV_ON)); | ||
583 | |||
584 | /* Load FIFO with data for Class 2 CHA */ | ||
585 | options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; | ||
586 | if (!edesc->assoc_nents) { | ||
587 | ptr = sg_dma_address(areq->assoc); | ||
588 | } else { | ||
589 | sg_to_link_tbl(areq->assoc, edesc->assoc_nents, | ||
590 | edesc->link_tbl, 0); | ||
591 | ptr = edesc->link_tbl_dma; | ||
592 | options |= LDST_SGF; | ||
593 | } | ||
594 | append_fifo_load(desc, ptr, areq->assoclen, options); | ||
595 | |||
596 | /* copy iv from cipher/class1 input context to class2 infifo */ | ||
597 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | ||
598 | |||
599 | if (!encrypt) { | ||
600 | u32 *jump_cmd, *uncond_jump_cmd; | ||
601 | |||
602 | /* JUMP if shared */ | ||
603 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
604 | |||
605 | /* start class 1 (cipher) operation, non-shared version */ | ||
606 | append_operation(desc, ctx->class1_alg_type | | ||
607 | OP_ALG_AS_INITFINAL); | ||
608 | |||
609 | uncond_jump_cmd = append_jump(desc, 0); | ||
610 | |||
611 | set_jump_tgt_here(desc, jump_cmd); | ||
612 | |||
613 | /* start class 1 (cipher) operation, shared version */ | ||
614 | append_operation(desc, ctx->class1_alg_type | | ||
615 | OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK); | ||
616 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
617 | } else | ||
618 | append_operation(desc, ctx->class1_alg_type | | ||
619 | OP_ALG_AS_INITFINAL | encrypt); | ||
620 | |||
621 | /* load payload & instruct to class2 to snoop class 1 if encrypting */ | ||
622 | options = 0; | ||
623 | if (!edesc->src_nents) { | ||
624 | src_dma = sg_dma_address(areq->src); | ||
625 | } else { | ||
626 | sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl + | ||
627 | edesc->assoc_nents, 0); | ||
628 | src_dma = edesc->link_tbl_dma + edesc->assoc_nents * | ||
629 | sizeof(struct link_tbl_entry); | ||
630 | options |= LDST_SGF; | ||
631 | } | ||
632 | append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options); | ||
633 | append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH | | ||
634 | FIFOLD_TYPE_LASTBOTH | | ||
635 | (encrypt ? FIFOLD_TYPE_MSG1OUT2 | ||
636 | : FIFOLD_TYPE_MSG)); | ||
637 | |||
638 | /* specify destination */ | ||
639 | if (areq->src == areq->dst) { | ||
640 | dst_dma = src_dma; | ||
641 | } else { | ||
642 | sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1, | ||
643 | DMA_FROM_DEVICE); | ||
644 | if (!edesc->dst_nents) { | ||
645 | dst_dma = sg_dma_address(areq->dst); | ||
646 | options = 0; | ||
647 | } else { | ||
648 | sg_to_link_tbl(areq->dst, edesc->dst_nents, | ||
649 | edesc->link_tbl + edesc->assoc_nents + | ||
650 | edesc->src_nents, 0); | ||
651 | dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + | ||
652 | edesc->src_nents) * | ||
653 | sizeof(struct link_tbl_entry); | ||
654 | options = LDST_SGF; | ||
655 | } | ||
656 | } | ||
657 | append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options); | ||
658 | append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA); | ||
659 | |||
660 | /* ICV */ | ||
661 | if (encrypt) | ||
662 | append_seq_store(desc, authsize, LDST_CLASS_2_CCB | | ||
663 | LDST_SRCDST_BYTE_CONTEXT); | ||
664 | else | ||
665 | append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 | | ||
666 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
667 | |||
668 | #ifdef DEBUG | ||
669 | debug("job_desc_len %d\n", desc_len(desc)); | ||
670 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
671 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1); | ||
672 | print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ", | ||
673 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | ||
674 | edesc->link_tbl_bytes, 1); | ||
675 | #endif | ||
676 | |||
677 | ret = caam_jr_enqueue(jrdev, desc, callback, areq); | ||
678 | if (!ret) | ||
679 | ret = -EINPROGRESS; | ||
680 | else { | ||
681 | ipsec_esp_unmap(jrdev, edesc, areq); | ||
682 | kfree(edesc); | ||
683 | } | ||
684 | |||
685 | return ret; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * derive number of elements in scatterlist | ||
690 | */ | ||
691 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | ||
692 | { | ||
693 | struct scatterlist *sg = sg_list; | ||
694 | int sg_nents = 0; | ||
695 | |||
696 | *chained = 0; | ||
697 | while (nbytes > 0) { | ||
698 | sg_nents++; | ||
699 | nbytes -= sg->length; | ||
700 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | ||
701 | *chained = 1; | ||
702 | sg = scatterwalk_sg_next(sg); | ||
703 | } | ||
704 | |||
705 | return sg_nents; | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * allocate and map the ipsec_esp extended descriptor | ||
710 | */ | ||
711 | static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | ||
712 | int desc_bytes) | ||
713 | { | ||
714 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
715 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
716 | struct device *jrdev = ctx->jrdev; | ||
717 | gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
718 | GFP_ATOMIC; | ||
719 | int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; | ||
720 | struct ipsec_esp_edesc *edesc; | ||
721 | |||
722 | assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained); | ||
723 | BUG_ON(chained); | ||
724 | if (likely(assoc_nents == 1)) | ||
725 | assoc_nents = 0; | ||
726 | |||
727 | src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize, | ||
728 | &chained); | ||
729 | BUG_ON(chained); | ||
730 | if (src_nents == 1) | ||
731 | src_nents = 0; | ||
732 | |||
733 | if (unlikely(areq->dst != areq->src)) { | ||
734 | dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize, | ||
735 | &chained); | ||
736 | BUG_ON(chained); | ||
737 | if (dst_nents == 1) | ||
738 | dst_nents = 0; | ||
739 | } | ||
740 | |||
741 | link_tbl_bytes = (assoc_nents + src_nents + dst_nents) * | ||
742 | sizeof(struct link_tbl_entry); | ||
743 | debug("link_tbl_bytes %d\n", link_tbl_bytes); | ||
744 | |||
745 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
746 | edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes + | ||
747 | link_tbl_bytes, GFP_DMA | flags); | ||
748 | if (!edesc) { | ||
749 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
750 | return ERR_PTR(-ENOMEM); | ||
751 | } | ||
752 | |||
753 | edesc->assoc_nents = assoc_nents; | ||
754 | edesc->src_nents = src_nents; | ||
755 | edesc->dst_nents = dst_nents; | ||
756 | edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) + | ||
757 | desc_bytes; | ||
758 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | ||
759 | link_tbl_bytes, DMA_TO_DEVICE); | ||
760 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
761 | |||
762 | return edesc; | ||
763 | } | ||
764 | |||
765 | static int aead_authenc_encrypt(struct aead_request *areq) | ||
766 | { | ||
767 | struct ipsec_esp_edesc *edesc; | ||
768 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
769 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
770 | struct device *jrdev = ctx->jrdev; | ||
771 | int ivsize = crypto_aead_ivsize(aead); | ||
772 | u32 *desc; | ||
773 | dma_addr_t iv_dma; | ||
774 | |||
775 | /* allocate extended descriptor */ | ||
776 | edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN * | ||
777 | CAAM_CMD_SZ); | ||
778 | if (IS_ERR(edesc)) | ||
779 | return PTR_ERR(edesc); | ||
780 | |||
781 | desc = edesc->hw_desc; | ||
782 | |||
783 | /* insert shared descriptor pointer */ | ||
784 | init_job_desc_shared(desc, ctx->shared_desc_phys, | ||
785 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
786 | |||
787 | iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE); | ||
788 | /* check dma error */ | ||
789 | |||
790 | append_load(desc, iv_dma, ivsize, | ||
791 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | ||
792 | |||
793 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | ||
794 | } | ||
795 | |||
796 | static int aead_authenc_decrypt(struct aead_request *req) | ||
797 | { | ||
798 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
799 | int ivsize = crypto_aead_ivsize(aead); | ||
800 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
801 | struct device *jrdev = ctx->jrdev; | ||
802 | struct ipsec_esp_edesc *edesc; | ||
803 | u32 *desc; | ||
804 | dma_addr_t iv_dma; | ||
805 | |||
806 | req->cryptlen -= ctx->authsize; | ||
807 | |||
808 | /* allocate extended descriptor */ | ||
809 | edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN * | ||
810 | CAAM_CMD_SZ); | ||
811 | if (IS_ERR(edesc)) | ||
812 | return PTR_ERR(edesc); | ||
813 | |||
814 | desc = edesc->hw_desc; | ||
815 | |||
816 | /* insert shared descriptor pointer */ | ||
817 | init_job_desc_shared(desc, ctx->shared_desc_phys, | ||
818 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
819 | |||
820 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | ||
821 | /* check dma error */ | ||
822 | |||
823 | append_load(desc, iv_dma, ivsize, | ||
824 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | ||
825 | |||
826 | return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done); | ||
827 | } | ||
828 | |||
829 | static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) | ||
830 | { | ||
831 | struct aead_request *areq = &req->areq; | ||
832 | struct ipsec_esp_edesc *edesc; | ||
833 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
834 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
835 | struct device *jrdev = ctx->jrdev; | ||
836 | int ivsize = crypto_aead_ivsize(aead); | ||
837 | dma_addr_t iv_dma; | ||
838 | u32 *desc; | ||
839 | |||
840 | iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE); | ||
841 | |||
842 | debug("%s: giv %p\n", __func__, req->giv); | ||
843 | |||
844 | /* allocate extended descriptor */ | ||
845 | edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN * | ||
846 | CAAM_CMD_SZ); | ||
847 | if (IS_ERR(edesc)) | ||
848 | return PTR_ERR(edesc); | ||
849 | |||
850 | desc = edesc->hw_desc; | ||
851 | |||
852 | /* insert shared descriptor pointer */ | ||
853 | init_job_desc_shared(desc, ctx->shared_desc_phys, | ||
854 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
855 | |||
856 | /* | ||
857 | * LOAD IMM Info FIFO | ||
858 | * to DECO, Last, Padding, Random, Message, 16 bytes | ||
859 | */ | ||
860 | append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 | | ||
861 | NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | | ||
862 | NFIFOENTRY_PTYPE_RND | ivsize, | ||
863 | LDST_SRCDST_WORD_INFO_FIFO); | ||
864 | |||
865 | /* | ||
866 | * disable info fifo entries since the above serves as the entry | ||
867 | * this way, the MOVE command won't generate an entry. | ||
868 | * Note that this isn't required in more recent versions of | ||
869 | * SEC as a MOVE that doesn't do info FIFO entries is available. | ||
870 | */ | ||
871 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
872 | |||
873 | /* MOVE DECO Alignment -> C1 Context 16 bytes */ | ||
874 | append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize); | ||
875 | |||
876 | /* re-enable info fifo entries */ | ||
877 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
878 | |||
879 | /* MOVE C1 Context -> OFIFO 16 bytes */ | ||
880 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize); | ||
881 | |||
882 | append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); | ||
883 | |||
884 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | ||
885 | } | ||
886 | |||
887 | struct caam_alg_template { | ||
888 | char name[CRYPTO_MAX_ALG_NAME]; | ||
889 | char driver_name[CRYPTO_MAX_ALG_NAME]; | ||
890 | unsigned int blocksize; | ||
891 | struct aead_alg aead; | ||
892 | u32 class1_alg_type; | ||
893 | u32 class2_alg_type; | ||
894 | u32 alg_op; | ||
895 | }; | ||
896 | |||
897 | static struct caam_alg_template driver_algs[] = { | ||
898 | /* single-pass ipsec_esp descriptor */ | ||
899 | { | ||
900 | .name = "authenc(hmac(sha1),cbc(aes))", | ||
901 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", | ||
902 | .blocksize = AES_BLOCK_SIZE, | ||
903 | .aead = { | ||
904 | .setkey = aead_authenc_setkey, | ||
905 | .setauthsize = aead_authenc_setauthsize, | ||
906 | .encrypt = aead_authenc_encrypt, | ||
907 | .decrypt = aead_authenc_decrypt, | ||
908 | .givencrypt = aead_authenc_givencrypt, | ||
909 | .geniv = "<built-in>", | ||
910 | .ivsize = AES_BLOCK_SIZE, | ||
911 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
912 | }, | ||
913 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
914 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | ||
915 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
916 | }, | ||
917 | { | ||
918 | .name = "authenc(hmac(sha256),cbc(aes))", | ||
919 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", | ||
920 | .blocksize = AES_BLOCK_SIZE, | ||
921 | .aead = { | ||
922 | .setkey = aead_authenc_setkey, | ||
923 | .setauthsize = aead_authenc_setauthsize, | ||
924 | .encrypt = aead_authenc_encrypt, | ||
925 | .decrypt = aead_authenc_decrypt, | ||
926 | .givencrypt = aead_authenc_givencrypt, | ||
927 | .geniv = "<built-in>", | ||
928 | .ivsize = AES_BLOCK_SIZE, | ||
929 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
930 | }, | ||
931 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
932 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
933 | OP_ALG_AAI_HMAC_PRECOMP, | ||
934 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
935 | }, | ||
936 | { | ||
937 | .name = "authenc(hmac(sha512),cbc(aes))", | ||
938 | .driver_name = "authenc-hmac-sha512-cbc-aes-caam", | ||
939 | .blocksize = AES_BLOCK_SIZE, | ||
940 | .aead = { | ||
941 | .setkey = aead_authenc_setkey, | ||
942 | .setauthsize = aead_authenc_setauthsize, | ||
943 | .encrypt = aead_authenc_encrypt, | ||
944 | .decrypt = aead_authenc_decrypt, | ||
945 | .givencrypt = aead_authenc_givencrypt, | ||
946 | .geniv = "<built-in>", | ||
947 | .ivsize = AES_BLOCK_SIZE, | ||
948 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
949 | }, | ||
950 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
951 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
952 | OP_ALG_AAI_HMAC_PRECOMP, | ||
953 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
954 | }, | ||
955 | { | ||
956 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | ||
957 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", | ||
958 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
959 | .aead = { | ||
960 | .setkey = aead_authenc_setkey, | ||
961 | .setauthsize = aead_authenc_setauthsize, | ||
962 | .encrypt = aead_authenc_encrypt, | ||
963 | .decrypt = aead_authenc_decrypt, | ||
964 | .givencrypt = aead_authenc_givencrypt, | ||
965 | .geniv = "<built-in>", | ||
966 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
967 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
968 | }, | ||
969 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
970 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | ||
971 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
972 | }, | ||
973 | { | ||
974 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | ||
975 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", | ||
976 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
977 | .aead = { | ||
978 | .setkey = aead_authenc_setkey, | ||
979 | .setauthsize = aead_authenc_setauthsize, | ||
980 | .encrypt = aead_authenc_encrypt, | ||
981 | .decrypt = aead_authenc_decrypt, | ||
982 | .givencrypt = aead_authenc_givencrypt, | ||
983 | .geniv = "<built-in>", | ||
984 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
985 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
986 | }, | ||
987 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
988 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
989 | OP_ALG_AAI_HMAC_PRECOMP, | ||
990 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
991 | }, | ||
992 | { | ||
993 | .name = "authenc(hmac(sha512),cbc(des3_ede))", | ||
994 | .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", | ||
995 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
996 | .aead = { | ||
997 | .setkey = aead_authenc_setkey, | ||
998 | .setauthsize = aead_authenc_setauthsize, | ||
999 | .encrypt = aead_authenc_encrypt, | ||
1000 | .decrypt = aead_authenc_decrypt, | ||
1001 | .givencrypt = aead_authenc_givencrypt, | ||
1002 | .geniv = "<built-in>", | ||
1003 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1004 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
1005 | }, | ||
1006 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
1007 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
1008 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1009 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
1010 | }, | ||
1011 | { | ||
1012 | .name = "authenc(hmac(sha1),cbc(des))", | ||
1013 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", | ||
1014 | .blocksize = DES_BLOCK_SIZE, | ||
1015 | .aead = { | ||
1016 | .setkey = aead_authenc_setkey, | ||
1017 | .setauthsize = aead_authenc_setauthsize, | ||
1018 | .encrypt = aead_authenc_encrypt, | ||
1019 | .decrypt = aead_authenc_decrypt, | ||
1020 | .givencrypt = aead_authenc_givencrypt, | ||
1021 | .geniv = "<built-in>", | ||
1022 | .ivsize = DES_BLOCK_SIZE, | ||
1023 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1024 | }, | ||
1025 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
1026 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1027 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
1028 | }, | ||
1029 | { | ||
1030 | .name = "authenc(hmac(sha256),cbc(des))", | ||
1031 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", | ||
1032 | .blocksize = DES_BLOCK_SIZE, | ||
1033 | .aead = { | ||
1034 | .setkey = aead_authenc_setkey, | ||
1035 | .setauthsize = aead_authenc_setauthsize, | ||
1036 | .encrypt = aead_authenc_encrypt, | ||
1037 | .decrypt = aead_authenc_decrypt, | ||
1038 | .givencrypt = aead_authenc_givencrypt, | ||
1039 | .geniv = "<built-in>", | ||
1040 | .ivsize = DES_BLOCK_SIZE, | ||
1041 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1042 | }, | ||
1043 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
1044 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
1045 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1046 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
1047 | }, | ||
1048 | { | ||
1049 | .name = "authenc(hmac(sha512),cbc(des))", | ||
1050 | .driver_name = "authenc-hmac-sha512-cbc-des-caam", | ||
1051 | .blocksize = DES_BLOCK_SIZE, | ||
1052 | .aead = { | ||
1053 | .setkey = aead_authenc_setkey, | ||
1054 | .setauthsize = aead_authenc_setauthsize, | ||
1055 | .encrypt = aead_authenc_encrypt, | ||
1056 | .decrypt = aead_authenc_decrypt, | ||
1057 | .givencrypt = aead_authenc_givencrypt, | ||
1058 | .geniv = "<built-in>", | ||
1059 | .ivsize = DES_BLOCK_SIZE, | ||
1060 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
1061 | }, | ||
1062 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
1063 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
1064 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1065 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
1066 | }, | ||
1067 | }; | ||
1068 | |||
1069 | struct caam_crypto_alg { | ||
1070 | struct list_head entry; | ||
1071 | struct device *ctrldev; | ||
1072 | int class1_alg_type; | ||
1073 | int class2_alg_type; | ||
1074 | int alg_op; | ||
1075 | struct crypto_alg crypto_alg; | ||
1076 | }; | ||
1077 | |||
1078 | static int caam_cra_init(struct crypto_tfm *tfm) | ||
1079 | { | ||
1080 | struct crypto_alg *alg = tfm->__crt_alg; | ||
1081 | struct caam_crypto_alg *caam_alg = | ||
1082 | container_of(alg, struct caam_crypto_alg, crypto_alg); | ||
1083 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1084 | struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); | ||
1085 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | ||
1086 | |||
1087 | /* | ||
1088 | * distribute tfms across job rings to ensure in-order | ||
1089 | * crypto request processing per tfm | ||
1090 | */ | ||
1091 | ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; | ||
1092 | |||
1093 | /* copy descriptor header template value */ | ||
1094 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | ||
1095 | ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; | ||
1096 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; | ||
1097 | |||
1098 | return 0; | ||
1099 | } | ||
1100 | |||
1101 | static void caam_cra_exit(struct crypto_tfm *tfm) | ||
1102 | { | ||
1103 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1104 | |||
1105 | if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys)) | ||
1106 | dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys, | ||
1107 | desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); | ||
1108 | kfree(ctx->sh_desc); | ||
1109 | |||
1110 | if (!dma_mapping_error(ctx->jrdev, ctx->key_phys)) | ||
1111 | dma_unmap_single(ctx->jrdev, ctx->key_phys, | ||
1112 | ctx->split_key_pad_len + ctx->enckeylen, | ||
1113 | DMA_TO_DEVICE); | ||
1114 | kfree(ctx->key); | ||
1115 | } | ||
1116 | |||
1117 | static void __exit caam_algapi_exit(void) | ||
1118 | { | ||
1119 | |||
1120 | struct device_node *dev_node; | ||
1121 | struct platform_device *pdev; | ||
1122 | struct device *ctrldev; | ||
1123 | struct caam_drv_private *priv; | ||
1124 | struct caam_crypto_alg *t_alg, *n; | ||
1125 | int i, err; | ||
1126 | |||
1127 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1128 | if (!dev_node) | ||
1129 | return; | ||
1130 | |||
1131 | pdev = of_find_device_by_node(dev_node); | ||
1132 | if (!pdev) | ||
1133 | return; | ||
1134 | |||
1135 | ctrldev = &pdev->dev; | ||
1136 | of_node_put(dev_node); | ||
1137 | priv = dev_get_drvdata(ctrldev); | ||
1138 | |||
1139 | if (!priv->alg_list.next) | ||
1140 | return; | ||
1141 | |||
1142 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | ||
1143 | crypto_unregister_alg(&t_alg->crypto_alg); | ||
1144 | list_del(&t_alg->entry); | ||
1145 | kfree(t_alg); | ||
1146 | } | ||
1147 | |||
1148 | for (i = 0; i < priv->total_jobrs; i++) { | ||
1149 | err = caam_jr_deregister(priv->algapi_jr[i]); | ||
1150 | if (err < 0) | ||
1151 | break; | ||
1152 | } | ||
1153 | kfree(priv->algapi_jr); | ||
1154 | } | ||
1155 | |||
1156 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | ||
1157 | struct caam_alg_template | ||
1158 | *template) | ||
1159 | { | ||
1160 | struct caam_crypto_alg *t_alg; | ||
1161 | struct crypto_alg *alg; | ||
1162 | |||
1163 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); | ||
1164 | if (!t_alg) { | ||
1165 | dev_err(ctrldev, "failed to allocate t_alg\n"); | ||
1166 | return ERR_PTR(-ENOMEM); | ||
1167 | } | ||
1168 | |||
1169 | alg = &t_alg->crypto_alg; | ||
1170 | |||
1171 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); | ||
1172 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
1173 | template->driver_name); | ||
1174 | alg->cra_module = THIS_MODULE; | ||
1175 | alg->cra_init = caam_cra_init; | ||
1176 | alg->cra_exit = caam_cra_exit; | ||
1177 | alg->cra_priority = CAAM_CRA_PRIORITY; | ||
1178 | alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
1179 | alg->cra_blocksize = template->blocksize; | ||
1180 | alg->cra_alignmask = 0; | ||
1181 | alg->cra_type = &crypto_aead_type; | ||
1182 | alg->cra_ctxsize = sizeof(struct caam_ctx); | ||
1183 | alg->cra_u.aead = template->aead; | ||
1184 | |||
1185 | t_alg->class1_alg_type = template->class1_alg_type; | ||
1186 | t_alg->class2_alg_type = template->class2_alg_type; | ||
1187 | t_alg->alg_op = template->alg_op; | ||
1188 | t_alg->ctrldev = ctrldev; | ||
1189 | |||
1190 | return t_alg; | ||
1191 | } | ||
1192 | |||
1193 | static int __init caam_algapi_init(void) | ||
1194 | { | ||
1195 | struct device_node *dev_node; | ||
1196 | struct platform_device *pdev; | ||
1197 | struct device *ctrldev, **jrdev; | ||
1198 | struct caam_drv_private *priv; | ||
1199 | int i = 0, err = 0; | ||
1200 | |||
1201 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1202 | if (!dev_node) | ||
1203 | return -ENODEV; | ||
1204 | |||
1205 | pdev = of_find_device_by_node(dev_node); | ||
1206 | if (!pdev) | ||
1207 | return -ENODEV; | ||
1208 | |||
1209 | ctrldev = &pdev->dev; | ||
1210 | priv = dev_get_drvdata(ctrldev); | ||
1211 | of_node_put(dev_node); | ||
1212 | |||
1213 | INIT_LIST_HEAD(&priv->alg_list); | ||
1214 | |||
1215 | jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL); | ||
1216 | if (!jrdev) | ||
1217 | return -ENOMEM; | ||
1218 | |||
1219 | for (i = 0; i < priv->total_jobrs; i++) { | ||
1220 | err = caam_jr_register(ctrldev, &jrdev[i]); | ||
1221 | if (err < 0) | ||
1222 | break; | ||
1223 | } | ||
1224 | if (err < 0 && i == 0) { | ||
1225 | dev_err(ctrldev, "algapi error in job ring registration: %d\n", | ||
1226 | err); | ||
1227 | kfree(jrdev); | ||
1228 | return err; | ||
1229 | } | ||
1230 | |||
1231 | priv->num_jrs_for_algapi = i; | ||
1232 | priv->algapi_jr = jrdev; | ||
1233 | atomic_set(&priv->tfm_count, -1); | ||
1234 | |||
1235 | /* register crypto algorithms the device supports */ | ||
1236 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | ||
1237 | /* TODO: check if h/w supports alg */ | ||
1238 | struct caam_crypto_alg *t_alg; | ||
1239 | |||
1240 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); | ||
1241 | if (IS_ERR(t_alg)) { | ||
1242 | err = PTR_ERR(t_alg); | ||
1243 | dev_warn(ctrldev, "%s alg allocation failed\n", | ||
1244 | driver_algs[i].driver_name); | ||
1245 | continue; | ||
1246 | } | ||
1247 | |||
1248 | err = crypto_register_alg(&t_alg->crypto_alg); | ||
1249 | if (err) { | ||
1250 | dev_warn(ctrldev, "%s alg registration failed\n", | ||
1251 | t_alg->crypto_alg.cra_driver_name); | ||
1252 | kfree(t_alg); | ||
1253 | } else { | ||
1254 | list_add_tail(&t_alg->entry, &priv->alg_list); | ||
1255 | dev_info(ctrldev, "%s\n", | ||
1256 | t_alg->crypto_alg.cra_driver_name); | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | return err; | ||
1261 | } | ||
1262 | |||
1263 | module_init(caam_algapi_init); | ||
1264 | module_exit(caam_algapi_exit); | ||
1265 | |||
1266 | MODULE_LICENSE("GPL"); | ||
1267 | MODULE_DESCRIPTION("FSL CAAM support for crypto API"); | ||
1268 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); | ||