aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2016-08-17 03:03:05 -0400
committerDavid S. Miller <davem@davemloft.net>2016-08-19 02:59:30 -0400
commit324429d74127d0cc8a1a42d20035f8f986149ec4 (patch)
tree05d9d8e48f8de04eea8640319630ce4e40b30660
parentd6657781b5a9d2e2c72da1af0d185899b2d7e2f1 (diff)
chcr: Support for Chelsio's Crypto Hardware
The Chelsio's Crypto Hardware can perform the following operations: SHA1, SHA224, SHA256, SHA384 and SHA512, HMAC(SHA1), HMAC(SHA224), HMAC(SHA256), HMAC(SHA384), HAMC(SHA512), AES-128-CBC, AES-192-CBC, AES-256-CBC, AES-128-XTS, AES-256-XTS This patch implements the driver for above mentioned features. This driver is an Upper Layer Driver which is attached to Chelsio's LLD (cxgb4) and uses the queue allocated by the LLD for sending the crypto requests to the Hardware and receiving the responses from it. The crypto operations can be performed by Chelsio's hardware from the userspace applications and/or from within the kernel space using the kernel's crypto API. The above mentioned crypto features have been tested using kernel's tests mentioned in testmgr.h. They also have been tested from user space using libkcapi and Openssl. Signed-off-by: Atul Gupta <atul.gupta@chelsio.com> Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c1525
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h471
-rw-r--r--drivers/crypto/chelsio/chcr_core.c240
-rw-r--r--drivers/crypto/chelsio/chcr_core.h80
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h203
5 files changed, 2519 insertions, 0 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
new file mode 100644
index 000000000000..ad8e353cf897
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -0,0 +1,1525 @@
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/cryptohash.h>
48#include <linux/skbuff.h>
49#include <linux/rtnetlink.h>
50#include <linux/highmem.h>
51#include <linux/scatterlist.h>
52
53#include <crypto/aes.h>
54#include <crypto/algapi.h>
55#include <crypto/hash.h>
56#include <crypto/sha.h>
57#include <crypto/internal/hash.h>
58
59#include "t4fw_api.h"
60#include "t4_msg.h"
61#include "chcr_core.h"
62#include "chcr_algo.h"
63#include "chcr_crypto.h"
64
65static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
66{
67 return ctx->crypto_ctx->ablkctx;
68}
69
70static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
71{
72 return ctx->crypto_ctx->hmacctx;
73}
74
75static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
76{
77 return ctx->dev->u_ctx;
78}
79
80static inline int is_ofld_imm(const struct sk_buff *skb)
81{
82 return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
83}
84
85/*
86 * sgl_len - calculates the size of an SGL of the given capacity
87 * @n: the number of SGL entries
88 * Calculates the number of flits needed for a scatter/gather list that
89 * can hold the given number of entries.
90 */
91static inline unsigned int sgl_len(unsigned int n)
92{
93 n--;
94 return (3 * n) / 2 + (n & 1) + 2;
95}
96
97/*
98 * chcr_handle_resp - Unmap the DMA buffers associated with the request
99 * @req: crypto request
100 */
101int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
102 int error_status)
103{
104 struct crypto_tfm *tfm = req->tfm;
105 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
106 struct uld_ctx *u_ctx = ULD_CTX(ctx);
107 struct chcr_req_ctx ctx_req;
108 struct cpl_fw6_pld *fw6_pld;
109 unsigned int digestsize, updated_digestsize;
110
111 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
112 case CRYPTO_ALG_TYPE_BLKCIPHER:
113 ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
114 ctx_req.ctx.ablk_ctx =
115 ablkcipher_request_ctx(ctx_req.req.ablk_req);
116 if (!error_status) {
117 fw6_pld = (struct cpl_fw6_pld *)input;
118 memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
119 AES_BLOCK_SIZE);
120 }
121 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
122 ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
123 if (ctx_req.ctx.ablk_ctx->skb) {
124 kfree_skb(ctx_req.ctx.ablk_ctx->skb);
125 ctx_req.ctx.ablk_ctx->skb = NULL;
126 }
127 break;
128
129 case CRYPTO_ALG_TYPE_AHASH:
130 ctx_req.req.ahash_req = (struct ahash_request *)req;
131 ctx_req.ctx.ahash_ctx =
132 ahash_request_ctx(ctx_req.req.ahash_req);
133 digestsize =
134 crypto_ahash_digestsize(crypto_ahash_reqtfm(
135 ctx_req.req.ahash_req));
136 updated_digestsize = digestsize;
137 if (digestsize == SHA224_DIGEST_SIZE)
138 updated_digestsize = SHA256_DIGEST_SIZE;
139 else if (digestsize == SHA384_DIGEST_SIZE)
140 updated_digestsize = SHA512_DIGEST_SIZE;
141 if (ctx_req.ctx.ahash_ctx->skb)
142 ctx_req.ctx.ahash_ctx->skb = NULL;
143 if (ctx_req.ctx.ahash_ctx->result == 1) {
144 ctx_req.ctx.ahash_ctx->result = 0;
145 memcpy(ctx_req.req.ahash_req->result, input +
146 sizeof(struct cpl_fw6_pld),
147 digestsize);
148 } else {
149 memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
150 sizeof(struct cpl_fw6_pld),
151 updated_digestsize);
152 }
153 kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
154 ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
155 break;
156 }
157 return 0;
158}
159
160/*
161 * calc_tx_flits_ofld - calculate # of flits for an offload packet
162 * @skb: the packet
163 * Returns the number of flits needed for the given offload packet.
164 * These packets are already fully constructed and no additional headers
165 * will be added.
166 */
167static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
168{
169 unsigned int flits, cnt;
170
171 if (is_ofld_imm(skb))
172 return DIV_ROUND_UP(skb->len, 8);
173
174 flits = skb_transport_offset(skb) / 8; /* headers */
175 cnt = skb_shinfo(skb)->nr_frags;
176 if (skb_tail_pointer(skb) != skb_transport_header(skb))
177 cnt++;
178 return flits + sgl_len(cnt);
179}
180
181static struct shash_desc *chcr_alloc_shash(unsigned int ds)
182{
183 struct crypto_shash *base_hash = NULL;
184 struct shash_desc *desc;
185
186 switch (ds) {
187 case SHA1_DIGEST_SIZE:
188 base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
189 break;
190 case SHA224_DIGEST_SIZE:
191 base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
192 break;
193 case SHA256_DIGEST_SIZE:
194 base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
195 break;
196 case SHA384_DIGEST_SIZE:
197 base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
198 break;
199 case SHA512_DIGEST_SIZE:
200 base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
201 break;
202 }
203 if (IS_ERR(base_hash)) {
204 pr_err("Can not allocate sha-generic algo.\n");
205 return (void *)base_hash;
206 }
207
208 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
209 GFP_KERNEL);
210 if (!desc)
211 return ERR_PTR(-ENOMEM);
212 desc->tfm = base_hash;
213 desc->flags = crypto_shash_get_flags(base_hash);
214 return desc;
215}
216
217static int chcr_compute_partial_hash(struct shash_desc *desc,
218 char *iopad, char *result_hash,
219 int digest_size)
220{
221 struct sha1_state sha1_st;
222 struct sha256_state sha256_st;
223 struct sha512_state sha512_st;
224 int error;
225
226 if (digest_size == SHA1_DIGEST_SIZE) {
227 error = crypto_shash_init(desc) ?:
228 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
229 crypto_shash_export(desc, (void *)&sha1_st);
230 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
231 } else if (digest_size == SHA224_DIGEST_SIZE) {
232 error = crypto_shash_init(desc) ?:
233 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
234 crypto_shash_export(desc, (void *)&sha256_st);
235 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
236
237 } else if (digest_size == SHA256_DIGEST_SIZE) {
238 error = crypto_shash_init(desc) ?:
239 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
240 crypto_shash_export(desc, (void *)&sha256_st);
241 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
242
243 } else if (digest_size == SHA384_DIGEST_SIZE) {
244 error = crypto_shash_init(desc) ?:
245 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
246 crypto_shash_export(desc, (void *)&sha512_st);
247 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
248
249 } else if (digest_size == SHA512_DIGEST_SIZE) {
250 error = crypto_shash_init(desc) ?:
251 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
252 crypto_shash_export(desc, (void *)&sha512_st);
253 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
254 } else {
255 error = -EINVAL;
256 pr_err("Unknown digest size %d\n", digest_size);
257 }
258 return error;
259}
260
261static void chcr_change_order(char *buf, int ds)
262{
263 int i;
264
265 if (ds == SHA512_DIGEST_SIZE) {
266 for (i = 0; i < (ds / sizeof(u64)); i++)
267 *((__be64 *)buf + i) =
268 cpu_to_be64(*((u64 *)buf + i));
269 } else {
270 for (i = 0; i < (ds / sizeof(u32)); i++)
271 *((__be32 *)buf + i) =
272 cpu_to_be32(*((u32 *)buf + i));
273 }
274}
275
276static inline int is_hmac(struct crypto_tfm *tfm)
277{
278 struct crypto_alg *alg = tfm->__crt_alg;
279 struct chcr_alg_template *chcr_crypto_alg =
280 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
281 alg.hash);
282 if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
283 CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
284 return 1;
285 return 0;
286}
287
288static inline unsigned int ch_nents(struct scatterlist *sg,
289 unsigned int *total_size)
290{
291 unsigned int nents;
292
293 for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
294 nents++;
295 *total_size += sg->length;
296 }
297 return nents;
298}
299
300static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
301 struct scatterlist *sg,
302 struct phys_sge_parm *sg_param)
303{
304 struct phys_sge_pairs *to;
305 unsigned int out_buf_size = sg_param->obsize;
306 unsigned int nents = sg_param->nents, i, j, tot_len = 0;
307
308 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
309 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
310 phys_cpl->pcirlxorder_to_noofsgentr =
311 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
312 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
313 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
314 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
315 CPL_RX_PHYS_DSGL_DCAID_V(0) |
316 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
317 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
318 phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
319 phys_cpl->rss_hdr_int.hash_val = 0;
320 to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
321 sizeof(struct cpl_rx_phys_dsgl));
322
323 for (i = 0; nents; to++) {
324 for (j = i; (nents && (j < (8 + i))); j++, nents--) {
325 to->len[j] = htons(sg->length);
326 to->addr[j] = cpu_to_be64(sg_dma_address(sg));
327 if (out_buf_size) {
328 if (tot_len + sg_dma_len(sg) >= out_buf_size) {
329 to->len[j] = htons(out_buf_size -
330 tot_len);
331 return;
332 }
333 tot_len += sg_dma_len(sg);
334 }
335 sg = sg_next(sg);
336 }
337 }
338}
339
340static inline unsigned
341int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
342 struct scatterlist *sg, struct phys_sge_parm *sg_param)
343{
344 if (!sg || !sg_param->nents)
345 return 0;
346
347 sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
348 if (sg_param->nents == 0) {
349 pr_err("CHCR : DMA mapping failed\n");
350 return -EINVAL;
351 }
352 write_phys_cpl(phys_cpl, sg, sg_param);
353 return 0;
354}
355
356static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
357{
358 struct crypto_alg *alg = tfm->__crt_alg;
359 struct chcr_alg_template *chcr_crypto_alg =
360 container_of(alg, struct chcr_alg_template, alg.crypto);
361
362 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
363}
364
365static inline void
366write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
367 struct scatterlist *sg, unsigned int count)
368{
369 struct page *spage;
370 unsigned int page_len;
371
372 skb->len += count;
373 skb->data_len += count;
374 skb->truesize += count;
375 while (count > 0) {
376 if (sg && (!(sg->length)))
377 break;
378 spage = sg_page(sg);
379 get_page(spage);
380 page_len = min(sg->length, count);
381 skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
382 (*frags)++;
383 count -= page_len;
384 sg = sg_next(sg);
385 }
386}
387
388static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
389 struct _key_ctx *key_ctx)
390{
391 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
392 get_aes_decrypt_key(key_ctx->key, ablkctx->key,
393 ablkctx->enckey_len << 3);
394 memset(key_ctx->key + ablkctx->enckey_len, 0,
395 CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
396 } else {
397 memcpy(key_ctx->key,
398 ablkctx->key + (ablkctx->enckey_len >> 1),
399 ablkctx->enckey_len >> 1);
400 get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
401 ablkctx->key, ablkctx->enckey_len << 2);
402 }
403 return 0;
404}
405
406static inline void create_wreq(struct chcr_context *ctx,
407 struct fw_crypto_lookaside_wr *wreq,
408 void *req, struct sk_buff *skb,
409 int kctx_len, int hash_sz,
410 unsigned int phys_dsgl)
411{
412 struct uld_ctx *u_ctx = ULD_CTX(ctx);
413 struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
414 struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
415 int iv_loc = IV_DSGL;
416 int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
417 unsigned int immdatalen = 0, nr_frags = 0;
418
419 if (is_ofld_imm(skb)) {
420 immdatalen = skb->data_len;
421 iv_loc = IV_IMMEDIATE;
422 } else {
423 nr_frags = skb_shinfo(skb)->nr_frags;
424 }
425
426 wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
427 (kctx_len >> 4));
428 wreq->pld_size_hash_size =
429 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
430 FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
431 wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
432 (calc_tx_flits_ofld(skb) * 8), 16)));
433 wreq->cookie = cpu_to_be64((uintptr_t)req);
434 wreq->rx_chid_to_rx_q_id =
435 FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
436 (hash_sz) ? IV_NOP : iv_loc);
437
438 ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
439 ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
440 16) - ((sizeof(*wreq)) >> 4)));
441
442 sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
443 sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
444 ((hash_sz) ? DUMMY_BYTES :
445 (sizeof(struct cpl_rx_phys_dsgl) +
446 phys_dsgl)) + immdatalen);
447}
448
449/**
450 * create_cipher_wr - form the WR for cipher operations
451 * @req: cipher req.
452 * @ctx: crypto driver context of the request.
453 * @qid: ingress qid where response of this WR should be received.
454 * @op_type: encryption or decryption
455 */
456static struct sk_buff
457*create_cipher_wr(struct crypto_async_request *req_base,
458 struct chcr_context *ctx, unsigned short qid,
459 unsigned short op_type)
460{
461 struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
462 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
463 struct uld_ctx *u_ctx = ULD_CTX(ctx);
464 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
465 struct sk_buff *skb = NULL;
466 struct _key_ctx *key_ctx;
467 struct fw_crypto_lookaside_wr *wreq;
468 struct cpl_tx_sec_pdu *sec_cpl;
469 struct cpl_rx_phys_dsgl *phys_cpl;
470 struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
471 struct phys_sge_parm sg_param;
472 unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
473 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
474
475 if (!req->info)
476 return ERR_PTR(-EINVAL);
477 ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
478 ablkctx->enc = op_type;
479
480 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
481 (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
482 return ERR_PTR(-EINVAL);
483
484 phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
485
486 kctx_len = sizeof(*key_ctx) +
487 (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
488 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
489 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
490 GFP_ATOMIC);
491 if (!skb)
492 return ERR_PTR(-ENOMEM);
493 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
494 wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
495
496 sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
497 sec_cpl->op_ivinsrtofst =
498 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
499
500 sec_cpl->pldlen = htonl(ivsize + req->nbytes);
501 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
502 ivsize + 1, 0);
503
504 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
505 0, 0);
506 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
507 ablkctx->ciph_mode,
508 0, 0, ivsize >> 1, 1);
509 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
510 0, 1, phys_dsgl);
511
512 key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
513 key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
514 if (op_type == CHCR_DECRYPT_OP) {
515 if (generate_copy_rrkey(ablkctx, key_ctx))
516 goto map_fail1;
517 } else {
518 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
519 memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
520 } else {
521 memcpy(key_ctx->key, ablkctx->key +
522 (ablkctx->enckey_len >> 1),
523 ablkctx->enckey_len >> 1);
524 memcpy(key_ctx->key +
525 (ablkctx->enckey_len >> 1),
526 ablkctx->key,
527 ablkctx->enckey_len >> 1);
528 }
529 }
530 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
531
532 memcpy(ablkctx->iv, req->info, ivsize);
533 sg_init_table(&ablkctx->iv_sg, 1);
534 sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
535 sg_param.nents = ablkctx->dst_nents;
536 sg_param.obsize = dst_bufsize;
537 sg_param.qid = qid;
538 sg_param.align = 1;
539 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
540 &sg_param))
541 goto map_fail1;
542
543 skb_set_transport_header(skb, transhdr_len);
544 write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
545 write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
546 create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
547 req_ctx->skb = skb;
548 skb_get(skb);
549 return skb;
550map_fail1:
551 kfree_skb(skb);
552 return ERR_PTR(-ENOMEM);
553}
554
555static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
556 unsigned int keylen)
557{
558 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
559 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
560 struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
561 unsigned int ck_size, context_size;
562 u16 alignment = 0;
563
564 if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
565 goto badkey_err;
566
567 memcpy(ablkctx->key, key, keylen);
568 ablkctx->enckey_len = keylen;
569 if (keylen == AES_KEYSIZE_128) {
570 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
571 } else if (keylen == AES_KEYSIZE_192) {
572 alignment = 8;
573 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
574 } else if (keylen == AES_KEYSIZE_256) {
575 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
576 } else {
577 goto badkey_err;
578 }
579
580 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
581 keylen + alignment) >> 4;
582
583 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
584 0, 0, context_size);
585 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
586 return 0;
587badkey_err:
588 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
589 ablkctx->enckey_len = 0;
590 return -EINVAL;
591}
592
593int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
594{
595 int ret = 0;
596 struct sge_ofld_txq *q;
597 struct adapter *adap = netdev2adap(dev);
598
599 local_bh_disable();
600 q = &adap->sge.ofldtxq[idx];
601 spin_lock(&q->sendq.lock);
602 if (q->full)
603 ret = -1;
604 spin_unlock(&q->sendq.lock);
605 local_bh_enable();
606 return ret;
607}
608
609static int chcr_aes_encrypt(struct ablkcipher_request *req)
610{
611 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
612 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
613 struct crypto_async_request *req_base = &req->base;
614 struct uld_ctx *u_ctx = ULD_CTX(ctx);
615 struct sk_buff *skb;
616
617 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
618 ctx->tx_channel_id))) {
619 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
620 return -EBUSY;
621 }
622
623 skb = create_cipher_wr(req_base, ctx,
624 u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
625 CHCR_ENCRYPT_OP);
626 if (IS_ERR(skb)) {
627 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
628 return PTR_ERR(skb);
629 }
630 skb->dev = u_ctx->lldi.ports[0];
631 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
632 chcr_send_wr(skb);
633 return -EINPROGRESS;
634}
635
636static int chcr_aes_decrypt(struct ablkcipher_request *req)
637{
638 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
639 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
640 struct crypto_async_request *req_base = &req->base;
641 struct uld_ctx *u_ctx = ULD_CTX(ctx);
642 struct sk_buff *skb;
643
644 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
645 ctx->tx_channel_id))) {
646 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
647 return -EBUSY;
648 }
649
650 skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
651 CHCR_DECRYPT_OP);
652 if (IS_ERR(skb)) {
653 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
654 return PTR_ERR(skb);
655 }
656 skb->dev = u_ctx->lldi.ports[0];
657 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
658 chcr_send_wr(skb);
659 return -EINPROGRESS;
660}
661
662static int chcr_device_init(struct chcr_context *ctx)
663{
664 struct uld_ctx *u_ctx;
665 unsigned int id;
666 int err = 0, rxq_perchan, rxq_idx;
667
668 id = smp_processor_id();
669 if (!ctx->dev) {
670 err = assign_chcr_device(&ctx->dev);
671 if (err) {
672 pr_err("chcr device assignment fails\n");
673 goto out;
674 }
675 u_ctx = ULD_CTX(ctx);
676 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
677 ctx->dev->tx_channel_id = 0;
678 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
679 rxq_idx += id % rxq_perchan;
680 spin_lock(&ctx->dev->lock_chcr_dev);
681 ctx->tx_channel_id = rxq_idx;
682 spin_unlock(&ctx->dev->lock_chcr_dev);
683 }
684out:
685 return err;
686}
687
688static int chcr_cra_init(struct crypto_tfm *tfm)
689{
690 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
691 return chcr_device_init(crypto_tfm_ctx(tfm));
692}
693
694static int get_alg_config(struct algo_param *params,
695 unsigned int auth_size)
696{
697 switch (auth_size) {
698 case SHA1_DIGEST_SIZE:
699 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
700 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
701 params->result_size = SHA1_DIGEST_SIZE;
702 break;
703 case SHA224_DIGEST_SIZE:
704 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
705 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
706 params->result_size = SHA256_DIGEST_SIZE;
707 break;
708 case SHA256_DIGEST_SIZE:
709 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
710 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
711 params->result_size = SHA256_DIGEST_SIZE;
712 break;
713 case SHA384_DIGEST_SIZE:
714 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
715 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
716 params->result_size = SHA512_DIGEST_SIZE;
717 break;
718 case SHA512_DIGEST_SIZE:
719 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
720 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
721 params->result_size = SHA512_DIGEST_SIZE;
722 break;
723 default:
724 pr_err("chcr : ERROR, unsupported digest size\n");
725 return -EINVAL;
726 }
727 return 0;
728}
729
730static inline int
731write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
732 struct sk_buff *skb, unsigned int *frags, char *bfr,
733 u8 bfr_len)
734{
735 void *page_ptr = NULL;
736
737 skb->len += bfr_len;
738 skb->data_len += bfr_len;
739 skb->truesize += bfr_len;
740 page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
741 if (!page_ptr)
742 return -ENOMEM;
743 get_page(virt_to_page(page_ptr));
744 req_ctx->dummy_payload_ptr = page_ptr;
745 memcpy(page_ptr, bfr, bfr_len);
746 skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
747 offset_in_page(page_ptr), bfr_len);
748 (*frags)++;
749 return 0;
750}
751
752/**
753 * create_final_hash_wr - Create hash work request
754 * @req - Cipher req base
755 */
756static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
757 struct hash_wr_param *param)
758{
759 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
760 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
761 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
762 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
763 struct sk_buff *skb = NULL;
764 struct _key_ctx *key_ctx;
765 struct fw_crypto_lookaside_wr *wreq;
766 struct cpl_tx_sec_pdu *sec_cpl;
767 unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
768 unsigned int digestsize = crypto_ahash_digestsize(tfm);
769 unsigned int kctx_len = sizeof(*key_ctx);
770 u8 hash_size_in_response = 0;
771
772 iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
773 kctx_len += param->alg_prm.result_size + iopad_alignment;
774 if (param->opad_needed)
775 kctx_len += param->alg_prm.result_size + iopad_alignment;
776
777 if (req_ctx->result)
778 hash_size_in_response = digestsize;
779 else
780 hash_size_in_response = param->alg_prm.result_size;
781 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
782 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
783 GFP_ATOMIC);
784 if (!skb)
785 return skb;
786
787 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
788 wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
789 memset(wreq, 0, transhdr_len);
790
791 sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
792 sec_cpl->op_ivinsrtofst =
793 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
794 sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
795
796 sec_cpl->aadstart_cipherstop_hi =
797 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
798 sec_cpl->cipherstop_lo_authinsert =
799 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
800 sec_cpl->seqno_numivs =
801 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
802 param->opad_needed, 0, 0);
803
804 sec_cpl->ivgen_hdrlen =
805 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
806
807 key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
808 memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
809
810 if (param->opad_needed)
811 memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
812 CHCR_HASH_MAX_DIGEST_SIZE),
813 hmacctx->opad, param->alg_prm.result_size);
814
815 key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
816 param->alg_prm.mk_size, 0,
817 param->opad_needed,
818 (kctx_len >> 4));
819 sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
820
821 skb_set_transport_header(skb, transhdr_len);
822 if (param->bfr_len != 0)
823 write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
824 param->bfr_len);
825 if (param->sg_len != 0)
826 write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
827
828 create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
829 0);
830 req_ctx->skb = skb;
831 skb_get(skb);
832 return skb;
833}
834
835static int chcr_ahash_update(struct ahash_request *req)
836{
837 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
838 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
839 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
840 struct uld_ctx *u_ctx = NULL;
841 struct sk_buff *skb;
842 u8 remainder = 0, bs;
843 unsigned int nbytes = req->nbytes;
844 struct hash_wr_param params;
845
846 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
847
848 u_ctx = ULD_CTX(ctx);
849 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
850 ctx->tx_channel_id))) {
851 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
852 return -EBUSY;
853 }
854
855 if (nbytes + req_ctx->bfr_len >= bs) {
856 remainder = (nbytes + req_ctx->bfr_len) % bs;
857 nbytes = nbytes + req_ctx->bfr_len - remainder;
858 } else {
859 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
860 req_ctx->bfr_len, nbytes, 0);
861 req_ctx->bfr_len += nbytes;
862 return 0;
863 }
864
865 params.opad_needed = 0;
866 params.more = 1;
867 params.last = 0;
868 params.sg_len = nbytes - req_ctx->bfr_len;
869 params.bfr_len = req_ctx->bfr_len;
870 params.scmd1 = 0;
871 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
872 req_ctx->result = 0;
873 req_ctx->data_len += params.sg_len + params.bfr_len;
874 skb = create_final_hash_wr(req, &params);
875 if (!skb)
876 return -ENOMEM;
877
878 req_ctx->bfr_len = remainder;
879 if (remainder)
880 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
881 req_ctx->bfr, remainder, req->nbytes -
882 remainder);
883 skb->dev = u_ctx->lldi.ports[0];
884 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
885 chcr_send_wr(skb);
886
887 return -EINPROGRESS;
888}
889
890static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
891{
892 memset(bfr_ptr, 0, bs);
893 *bfr_ptr = 0x80;
894 if (bs == 64)
895 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
896 else
897 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
898}
899
900static int chcr_ahash_final(struct ahash_request *req)
901{
902 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
903 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
904 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
905 struct hash_wr_param params;
906 struct sk_buff *skb;
907 struct uld_ctx *u_ctx = NULL;
908 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
909
910 u_ctx = ULD_CTX(ctx);
911 if (is_hmac(crypto_ahash_tfm(rtfm)))
912 params.opad_needed = 1;
913 else
914 params.opad_needed = 0;
915 params.sg_len = 0;
916 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
917 req_ctx->result = 1;
918 params.bfr_len = req_ctx->bfr_len;
919 req_ctx->data_len += params.bfr_len + params.sg_len;
920 if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
921 create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
922 params.last = 0;
923 params.more = 1;
924 params.scmd1 = 0;
925 params.bfr_len = bs;
926
927 } else {
928 params.scmd1 = req_ctx->data_len;
929 params.last = 1;
930 params.more = 0;
931 }
932 skb = create_final_hash_wr(req, &params);
933 skb->dev = u_ctx->lldi.ports[0];
934 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
935 chcr_send_wr(skb);
936 return -EINPROGRESS;
937}
938
939static int chcr_ahash_finup(struct ahash_request *req)
940{
941 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
942 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
943 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
944 struct uld_ctx *u_ctx = NULL;
945 struct sk_buff *skb;
946 struct hash_wr_param params;
947 u8 bs;
948
949 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
950 u_ctx = ULD_CTX(ctx);
951
952 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
953 ctx->tx_channel_id))) {
954 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
955 return -EBUSY;
956 }
957
958 if (is_hmac(crypto_ahash_tfm(rtfm)))
959 params.opad_needed = 1;
960 else
961 params.opad_needed = 0;
962
963 params.sg_len = req->nbytes;
964 params.bfr_len = req_ctx->bfr_len;
965 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
966 req_ctx->data_len += params.bfr_len + params.sg_len;
967 req_ctx->result = 1;
968 if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
969 create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
970 params.last = 0;
971 params.more = 1;
972 params.scmd1 = 0;
973 params.bfr_len = bs;
974 } else {
975 params.scmd1 = req_ctx->data_len;
976 params.last = 1;
977 params.more = 0;
978 }
979
980 skb = create_final_hash_wr(req, &params);
981 if (!skb)
982 return -ENOMEM;
983 skb->dev = u_ctx->lldi.ports[0];
984 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
985 chcr_send_wr(skb);
986
987 return -EINPROGRESS;
988}
989
990static int chcr_ahash_digest(struct ahash_request *req)
991{
992 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
993 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
994 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
995 struct uld_ctx *u_ctx = NULL;
996 struct sk_buff *skb;
997 struct hash_wr_param params;
998 u8 bs;
999
1000 rtfm->init(req);
1001 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1002
1003 u_ctx = ULD_CTX(ctx);
1004 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1005 ctx->tx_channel_id))) {
1006 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1007 return -EBUSY;
1008 }
1009
1010 if (is_hmac(crypto_ahash_tfm(rtfm)))
1011 params.opad_needed = 1;
1012 else
1013 params.opad_needed = 0;
1014
1015 params.last = 0;
1016 params.more = 0;
1017 params.sg_len = req->nbytes;
1018 params.bfr_len = 0;
1019 params.scmd1 = 0;
1020 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1021 req_ctx->result = 1;
1022 req_ctx->data_len += params.bfr_len + params.sg_len;
1023
1024 if (req_ctx->bfr && req->nbytes == 0) {
1025 create_last_hash_block(req_ctx->bfr, bs, 0);
1026 params.more = 1;
1027 params.bfr_len = bs;
1028 }
1029
1030 skb = create_final_hash_wr(req, &params);
1031 if (!skb)
1032 return -ENOMEM;
1033
1034 skb->dev = u_ctx->lldi.ports[0];
1035 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1036 chcr_send_wr(skb);
1037 return -EINPROGRESS;
1038}
1039
1040static int chcr_ahash_export(struct ahash_request *areq, void *out)
1041{
1042 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1043 struct chcr_ahash_req_ctx *state = out;
1044
1045 state->bfr_len = req_ctx->bfr_len;
1046 state->data_len = req_ctx->data_len;
1047 memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
1048 memcpy(state->partial_hash, req_ctx->partial_hash,
1049 CHCR_HASH_MAX_DIGEST_SIZE);
1050 return 0;
1051}
1052
1053static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1054{
1055 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1056 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1057
1058 req_ctx->bfr_len = state->bfr_len;
1059 req_ctx->data_len = state->data_len;
1060 req_ctx->dummy_payload_ptr = NULL;
1061 memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
1062 memcpy(req_ctx->partial_hash, state->partial_hash,
1063 CHCR_HASH_MAX_DIGEST_SIZE);
1064 return 0;
1065}
1066
1067static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1068 unsigned int keylen)
1069{
1070 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1071 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1072 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1073 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1074 unsigned int i, err = 0, updated_digestsize;
1075
1076 /*
1077 * use the key to calculate the ipad and opad. ipad will sent with the
1078 * first request's data. opad will be sent with the final hash result
1079 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1080 */
1081 if (!hmacctx->desc)
1082 return -EINVAL;
1083 if (keylen > bs) {
1084 err = crypto_shash_digest(hmacctx->desc, key, keylen,
1085 hmacctx->ipad);
1086 if (err)
1087 goto out;
1088 keylen = digestsize;
1089 } else {
1090 memcpy(hmacctx->ipad, key, keylen);
1091 }
1092 memset(hmacctx->ipad + keylen, 0, bs - keylen);
1093 memcpy(hmacctx->opad, hmacctx->ipad, bs);
1094
1095 for (i = 0; i < bs / sizeof(int); i++) {
1096 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1097 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1098 }
1099
1100 updated_digestsize = digestsize;
1101 if (digestsize == SHA224_DIGEST_SIZE)
1102 updated_digestsize = SHA256_DIGEST_SIZE;
1103 else if (digestsize == SHA384_DIGEST_SIZE)
1104 updated_digestsize = SHA512_DIGEST_SIZE;
1105 err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
1106 hmacctx->ipad, digestsize);
1107 if (err)
1108 goto out;
1109 chcr_change_order(hmacctx->ipad, updated_digestsize);
1110
1111 err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
1112 hmacctx->opad, digestsize);
1113 if (err)
1114 goto out;
1115 chcr_change_order(hmacctx->opad, updated_digestsize);
1116out:
1117 return err;
1118}
1119
1120static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1121 unsigned int key_len)
1122{
1123 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1124 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1125 int status = 0;
1126 unsigned short context_size = 0;
1127
1128 if ((key_len == (AES_KEYSIZE_128 << 1)) ||
1129 (key_len == (AES_KEYSIZE_256 << 1))) {
1130 memcpy(ablkctx->key, key, key_len);
1131 ablkctx->enckey_len = key_len;
1132 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1133 ablkctx->key_ctx_hdr =
1134 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1135 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1136 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1137 CHCR_KEYCTX_NO_KEY, 1,
1138 0, context_size);
1139 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1140 } else {
1141 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1142 CRYPTO_TFM_RES_BAD_KEY_LEN);
1143 ablkctx->enckey_len = 0;
1144 status = -EINVAL;
1145 }
1146 return status;
1147}
1148
1149static int chcr_sha_init(struct ahash_request *areq)
1150{
1151 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1152 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1153 int digestsize = crypto_ahash_digestsize(tfm);
1154
1155 req_ctx->data_len = 0;
1156 req_ctx->dummy_payload_ptr = NULL;
1157 req_ctx->bfr_len = 0;
1158 req_ctx->skb = NULL;
1159 req_ctx->result = 0;
1160 copy_hash_init_values(req_ctx->partial_hash, digestsize);
1161 return 0;
1162}
1163
1164static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1165{
1166 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1167 sizeof(struct chcr_ahash_req_ctx));
1168 return chcr_device_init(crypto_tfm_ctx(tfm));
1169}
1170
1171static int chcr_hmac_init(struct ahash_request *areq)
1172{
1173 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1174 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1175 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1176 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1177 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1178 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1179
1180 chcr_sha_init(areq);
1181 req_ctx->data_len = bs;
1182 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1183 if (digestsize == SHA224_DIGEST_SIZE)
1184 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1185 SHA256_DIGEST_SIZE);
1186 else if (digestsize == SHA384_DIGEST_SIZE)
1187 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1188 SHA512_DIGEST_SIZE);
1189 else
1190 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1191 digestsize);
1192 }
1193 return 0;
1194}
1195
1196static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1197{
1198 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1199 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1200 unsigned int digestsize =
1201 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1202
1203 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1204 sizeof(struct chcr_ahash_req_ctx));
1205 hmacctx->desc = chcr_alloc_shash(digestsize);
1206 if (IS_ERR(hmacctx->desc))
1207 return PTR_ERR(hmacctx->desc);
1208 return chcr_device_init(crypto_tfm_ctx(tfm));
1209}
1210
1211static void chcr_free_shash(struct shash_desc *desc)
1212{
1213 crypto_free_shash(desc->tfm);
1214 kfree(desc);
1215}
1216
1217static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1218{
1219 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1220 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1221
1222 if (hmacctx->desc) {
1223 chcr_free_shash(hmacctx->desc);
1224 hmacctx->desc = NULL;
1225 }
1226}
1227
1228static struct chcr_alg_template driver_algs[] = {
1229 /* AES-CBC */
1230 {
1231 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1232 .is_registered = 0,
1233 .alg.crypto = {
1234 .cra_name = "cbc(aes)",
1235 .cra_driver_name = "cbc(aes-chcr)",
1236 .cra_priority = CHCR_CRA_PRIORITY,
1237 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1238 CRYPTO_ALG_ASYNC,
1239 .cra_blocksize = AES_BLOCK_SIZE,
1240 .cra_ctxsize = sizeof(struct chcr_context)
1241 + sizeof(struct ablk_ctx),
1242 .cra_alignmask = 0,
1243 .cra_type = &crypto_ablkcipher_type,
1244 .cra_module = THIS_MODULE,
1245 .cra_init = chcr_cra_init,
1246 .cra_exit = NULL,
1247 .cra_u.ablkcipher = {
1248 .min_keysize = AES_MIN_KEY_SIZE,
1249 .max_keysize = AES_MAX_KEY_SIZE,
1250 .ivsize = AES_BLOCK_SIZE,
1251 .setkey = chcr_aes_cbc_setkey,
1252 .encrypt = chcr_aes_encrypt,
1253 .decrypt = chcr_aes_decrypt,
1254 }
1255 }
1256 },
1257 {
1258 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1259 .is_registered = 0,
1260 .alg.crypto = {
1261 .cra_name = "xts(aes)",
1262 .cra_driver_name = "xts(aes-chcr)",
1263 .cra_priority = CHCR_CRA_PRIORITY,
1264 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1265 CRYPTO_ALG_ASYNC,
1266 .cra_blocksize = AES_BLOCK_SIZE,
1267 .cra_ctxsize = sizeof(struct chcr_context) +
1268 sizeof(struct ablk_ctx),
1269 .cra_alignmask = 0,
1270 .cra_type = &crypto_ablkcipher_type,
1271 .cra_module = THIS_MODULE,
1272 .cra_init = chcr_cra_init,
1273 .cra_exit = NULL,
1274 .cra_u = {
1275 .ablkcipher = {
1276 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1277 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1278 .ivsize = AES_BLOCK_SIZE,
1279 .setkey = chcr_aes_xts_setkey,
1280 .encrypt = chcr_aes_encrypt,
1281 .decrypt = chcr_aes_decrypt,
1282 }
1283 }
1284 }
1285 },
1286 /* SHA */
1287 {
1288 .type = CRYPTO_ALG_TYPE_AHASH,
1289 .is_registered = 0,
1290 .alg.hash = {
1291 .halg.digestsize = SHA1_DIGEST_SIZE,
1292 .halg.base = {
1293 .cra_name = "sha1",
1294 .cra_driver_name = "sha1-chcr",
1295 .cra_blocksize = SHA1_BLOCK_SIZE,
1296 }
1297 }
1298 },
1299 {
1300 .type = CRYPTO_ALG_TYPE_AHASH,
1301 .is_registered = 0,
1302 .alg.hash = {
1303 .halg.digestsize = SHA256_DIGEST_SIZE,
1304 .halg.base = {
1305 .cra_name = "sha256",
1306 .cra_driver_name = "sha256-chcr",
1307 .cra_blocksize = SHA256_BLOCK_SIZE,
1308 }
1309 }
1310 },
1311 {
1312 .type = CRYPTO_ALG_TYPE_AHASH,
1313 .is_registered = 0,
1314 .alg.hash = {
1315 .halg.digestsize = SHA224_DIGEST_SIZE,
1316 .halg.base = {
1317 .cra_name = "sha224",
1318 .cra_driver_name = "sha224-chcr",
1319 .cra_blocksize = SHA224_BLOCK_SIZE,
1320 }
1321 }
1322 },
1323 {
1324 .type = CRYPTO_ALG_TYPE_AHASH,
1325 .is_registered = 0,
1326 .alg.hash = {
1327 .halg.digestsize = SHA384_DIGEST_SIZE,
1328 .halg.base = {
1329 .cra_name = "sha384",
1330 .cra_driver_name = "sha384-chcr",
1331 .cra_blocksize = SHA384_BLOCK_SIZE,
1332 }
1333 }
1334 },
1335 {
1336 .type = CRYPTO_ALG_TYPE_AHASH,
1337 .is_registered = 0,
1338 .alg.hash = {
1339 .halg.digestsize = SHA512_DIGEST_SIZE,
1340 .halg.base = {
1341 .cra_name = "sha512",
1342 .cra_driver_name = "sha512-chcr",
1343 .cra_blocksize = SHA512_BLOCK_SIZE,
1344 }
1345 }
1346 },
1347 /* HMAC */
1348 {
1349 .type = CRYPTO_ALG_TYPE_HMAC,
1350 .is_registered = 0,
1351 .alg.hash = {
1352 .halg.digestsize = SHA1_DIGEST_SIZE,
1353 .halg.base = {
1354 .cra_name = "hmac(sha1)",
1355 .cra_driver_name = "hmac(sha1-chcr)",
1356 .cra_blocksize = SHA1_BLOCK_SIZE,
1357 }
1358 }
1359 },
1360 {
1361 .type = CRYPTO_ALG_TYPE_HMAC,
1362 .is_registered = 0,
1363 .alg.hash = {
1364 .halg.digestsize = SHA224_DIGEST_SIZE,
1365 .halg.base = {
1366 .cra_name = "hmac(sha224)",
1367 .cra_driver_name = "hmac(sha224-chcr)",
1368 .cra_blocksize = SHA224_BLOCK_SIZE,
1369 }
1370 }
1371 },
1372 {
1373 .type = CRYPTO_ALG_TYPE_HMAC,
1374 .is_registered = 0,
1375 .alg.hash = {
1376 .halg.digestsize = SHA256_DIGEST_SIZE,
1377 .halg.base = {
1378 .cra_name = "hmac(sha256)",
1379 .cra_driver_name = "hmac(sha256-chcr)",
1380 .cra_blocksize = SHA256_BLOCK_SIZE,
1381 }
1382 }
1383 },
1384 {
1385 .type = CRYPTO_ALG_TYPE_HMAC,
1386 .is_registered = 0,
1387 .alg.hash = {
1388 .halg.digestsize = SHA384_DIGEST_SIZE,
1389 .halg.base = {
1390 .cra_name = "hmac(sha384)",
1391 .cra_driver_name = "hmac(sha384-chcr)",
1392 .cra_blocksize = SHA384_BLOCK_SIZE,
1393 }
1394 }
1395 },
1396 {
1397 .type = CRYPTO_ALG_TYPE_HMAC,
1398 .is_registered = 0,
1399 .alg.hash = {
1400 .halg.digestsize = SHA512_DIGEST_SIZE,
1401 .halg.base = {
1402 .cra_name = "hmac(sha512)",
1403 .cra_driver_name = "hmac(sha512-chcr)",
1404 .cra_blocksize = SHA512_BLOCK_SIZE,
1405 }
1406 }
1407 },
1408};
1409
1410/*
1411 * chcr_unregister_alg - Deregister crypto algorithms with
1412 * kernel framework.
1413 */
1414static int chcr_unregister_alg(void)
1415{
1416 int i;
1417
1418 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1419 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1420 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1421 if (driver_algs[i].is_registered)
1422 crypto_unregister_alg(
1423 &driver_algs[i].alg.crypto);
1424 break;
1425 case CRYPTO_ALG_TYPE_AHASH:
1426 if (driver_algs[i].is_registered)
1427 crypto_unregister_ahash(
1428 &driver_algs[i].alg.hash);
1429 break;
1430 }
1431 driver_algs[i].is_registered = 0;
1432 }
1433 return 0;
1434}
1435
1436#define SZ_AHASH_CTX sizeof(struct chcr_context)
1437#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
1438#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
1439#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
1440
1441/*
1442 * chcr_register_alg - Register crypto algorithms with kernel framework.
1443 */
1444static int chcr_register_alg(void)
1445{
1446 struct crypto_alg ai;
1447 struct ahash_alg *a_hash;
1448 int err = 0, i;
1449 char *name = NULL;
1450
1451 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1452 if (driver_algs[i].is_registered)
1453 continue;
1454 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1455 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1456 err = crypto_register_alg(&driver_algs[i].alg.crypto);
1457 name = driver_algs[i].alg.crypto.cra_driver_name;
1458 break;
1459 case CRYPTO_ALG_TYPE_AHASH:
1460 a_hash = &driver_algs[i].alg.hash;
1461 a_hash->update = chcr_ahash_update;
1462 a_hash->final = chcr_ahash_final;
1463 a_hash->finup = chcr_ahash_finup;
1464 a_hash->digest = chcr_ahash_digest;
1465 a_hash->export = chcr_ahash_export;
1466 a_hash->import = chcr_ahash_import;
1467 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
1468 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
1469 a_hash->halg.base.cra_module = THIS_MODULE;
1470 a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
1471 a_hash->halg.base.cra_alignmask = 0;
1472 a_hash->halg.base.cra_exit = NULL;
1473 a_hash->halg.base.cra_type = &crypto_ahash_type;
1474
1475 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
1476 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
1477 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
1478 a_hash->init = chcr_hmac_init;
1479 a_hash->setkey = chcr_ahash_setkey;
1480 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
1481 } else {
1482 a_hash->init = chcr_sha_init;
1483 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
1484 a_hash->halg.base.cra_init = chcr_sha_cra_init;
1485 }
1486 err = crypto_register_ahash(&driver_algs[i].alg.hash);
1487 ai = driver_algs[i].alg.hash.halg.base;
1488 name = ai.cra_driver_name;
1489 break;
1490 }
1491 if (err) {
1492 pr_err("chcr : %s : Algorithm registration failed\n",
1493 name);
1494 goto register_err;
1495 } else {
1496 driver_algs[i].is_registered = 1;
1497 }
1498 }
1499 return 0;
1500
1501register_err:
1502 chcr_unregister_alg();
1503 return err;
1504}
1505
1506/*
1507 * start_crypto - Register the crypto algorithms.
1508 * This should called once when the first device comesup. After this
1509 * kernel will start calling driver APIs for crypto operations.
1510 */
1511int start_crypto(void)
1512{
1513 return chcr_register_alg();
1514}
1515
1516/*
1517 * stop_crypto - Deregister all the crypto algorithms with kernel.
1518 * This should be called once when the last device goes down. After this
1519 * kernel will not call the driver API for crypto operations.
1520 */
1521int stop_crypto(void)
1522{
1523 chcr_unregister_alg();
1524 return 0;
1525}
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
new file mode 100644
index 000000000000..ec64fbcdeb49
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -0,0 +1,471 @@
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 */
35
36#ifndef __CHCR_ALGO_H__
37#define __CHCR_ALGO_H__
38
39/* Crypto key context */
40#define KEY_CONTEXT_CTX_LEN_S 24
41#define KEY_CONTEXT_CTX_LEN_M 0xff
42#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S)
43#define KEY_CONTEXT_CTX_LEN_G(x) \
44 (((x) >> KEY_CONTEXT_CTX_LEN_S) & KEY_CONTEXT_CTX_LEN_M)
45
46#define KEY_CONTEXT_DUAL_CK_S 12
47#define KEY_CONTEXT_DUAL_CK_M 0x1
48#define KEY_CONTEXT_DUAL_CK_V(x) ((x) << KEY_CONTEXT_DUAL_CK_S)
49#define KEY_CONTEXT_DUAL_CK_G(x) \
50(((x) >> KEY_CONTEXT_DUAL_CK_S) & KEY_CONTEXT_DUAL_CK_M)
51#define KEY_CONTEXT_DUAL_CK_F KEY_CONTEXT_DUAL_CK_V(1U)
52
53#define KEY_CONTEXT_SALT_PRESENT_S 10
54#define KEY_CONTEXT_SALT_PRESENT_M 0x1
55#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S)
56#define KEY_CONTEXT_SALT_PRESENT_G(x) \
57 (((x) >> KEY_CONTEXT_SALT_PRESENT_S) & \
58 KEY_CONTEXT_SALT_PRESENT_M)
59#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U)
60
61#define KEY_CONTEXT_VALID_S 0
62#define KEY_CONTEXT_VALID_M 0x1
63#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S)
64#define KEY_CONTEXT_VALID_G(x) \
65 (((x) >> KEY_CONTEXT_VALID_S) & \
66 KEY_CONTEXT_VALID_M)
67#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U)
68
69#define KEY_CONTEXT_CK_SIZE_S 6
70#define KEY_CONTEXT_CK_SIZE_M 0xf
71#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S)
72#define KEY_CONTEXT_CK_SIZE_G(x) \
73 (((x) >> KEY_CONTEXT_CK_SIZE_S) & KEY_CONTEXT_CK_SIZE_M)
74
75#define KEY_CONTEXT_MK_SIZE_S 2
76#define KEY_CONTEXT_MK_SIZE_M 0xf
77#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S)
78#define KEY_CONTEXT_MK_SIZE_G(x) \
79 (((x) >> KEY_CONTEXT_MK_SIZE_S) & KEY_CONTEXT_MK_SIZE_M)
80
81#define KEY_CONTEXT_OPAD_PRESENT_S 11
82#define KEY_CONTEXT_OPAD_PRESENT_M 0x1
83#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
84#define KEY_CONTEXT_OPAD_PRESENT_G(x) \
85 (((x) >> KEY_CONTEXT_OPAD_PRESENT_S) & \
86 KEY_CONTEXT_OPAD_PRESENT_M)
87#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
88
89#define CHCR_HASH_MAX_DIGEST_SIZE 64
90#define CHCR_MAX_SHA_DIGEST_SIZE 64
91
92#define IPSEC_TRUNCATED_ICV_SIZE 12
93#define TLS_TRUNCATED_HMAC_SIZE 10
94#define CBCMAC_DIGEST_SIZE 16
95#define MAX_HASH_NAME 20
96
97#define SHA1_INIT_STATE_5X4B 5
98#define SHA256_INIT_STATE_8X4B 8
99#define SHA512_INIT_STATE_8X8B 8
100#define SHA1_INIT_STATE SHA1_INIT_STATE_5X4B
101#define SHA224_INIT_STATE SHA256_INIT_STATE_8X4B
102#define SHA256_INIT_STATE SHA256_INIT_STATE_8X4B
103#define SHA384_INIT_STATE SHA512_INIT_STATE_8X8B
104#define SHA512_INIT_STATE SHA512_INIT_STATE_8X8B
105
106#define DUMMY_BYTES 16
107
108#define IPAD_DATA 0x36363636
109#define OPAD_DATA 0x5c5c5c5c
110
111#define TRANSHDR_SIZE(alignedkctx_len)\
112 (sizeof(struct ulptx_idata) +\
113 sizeof(struct ulp_txpkt) +\
114 sizeof(struct fw_crypto_lookaside_wr) +\
115 sizeof(struct cpl_tx_sec_pdu) +\
116 (alignedkctx_len))
117#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
118 (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
119 sizeof(struct cpl_rx_phys_dsgl))
120#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
121 (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
122
123#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
124 sizeof(struct ulp_txpkt) + \
125 sizeof(struct ulptx_idata))
126
127#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
128 htonl( \
129 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
130 CPL_TX_SEC_PDU_RXCHID_V((id)) | \
131 CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
132 CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
133 CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
134 CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
135 CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
136
137#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
138 htonl( \
139 CPL_TX_SEC_PDU_AADSTART_V((a_start)) | \
140 CPL_TX_SEC_PDU_AADSTOP_V((a_stop)) | \
141 CPL_TX_SEC_PDU_CIPHERSTART_V((c_start)) | \
142 CPL_TX_SEC_PDU_CIPHERSTOP_HI_V((c_stop_hi)))
143
144#define FILL_SEC_CPL_AUTHINSERT(c_stop_lo, a_start, a_stop, a_inst) \
145 htonl( \
146 CPL_TX_SEC_PDU_CIPHERSTOP_LO_V((c_stop_lo)) | \
147 CPL_TX_SEC_PDU_AUTHSTART_V((a_start)) | \
148 CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
149 CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
150
151#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
152 htonl( \
153 SCMD_SEQ_NO_CTRL_V(0) | \
154 SCMD_STATUS_PRESENT_V(0) | \
155 SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) | \
156 SCMD_ENC_DEC_CTRL_V((ctrl)) | \
157 SCMD_CIPH_AUTH_SEQ_CTRL_V((seq)) | \
158 SCMD_CIPH_MODE_V((cmode)) | \
159 SCMD_AUTH_MODE_V((amode)) | \
160 SCMD_HMAC_CTRL_V((opad)) | \
161 SCMD_IV_SIZE_V((size)) | \
162 SCMD_NUM_IVS_V((nivs)))
163
164#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
165 SCMD_ENB_DBGID_V(0) | \
166 SCMD_IV_GEN_CTRL_V(0) | \
167 SCMD_LAST_FRAG_V((last)) | \
168 SCMD_MORE_FRAGS_V((more)) | \
169 SCMD_TLS_COMPPDU_V(0) | \
170 SCMD_KEY_CTX_INLINE_V((ctx_in)) | \
171 SCMD_TLS_FRAG_ENABLE_V(0) | \
172 SCMD_MAC_ONLY_V((mac)) | \
173 SCMD_AADIVDROP_V((ivdrop)) | \
174 SCMD_HDR_LEN_V((len)))
175
176#define FILL_KEY_CTX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \
177 htonl(KEY_CONTEXT_VALID_V(1) | \
178 KEY_CONTEXT_CK_SIZE_V((ck_size)) | \
179 KEY_CONTEXT_MK_SIZE_V(mk_size) | \
180 KEY_CONTEXT_DUAL_CK_V((d_ck)) | \
181 KEY_CONTEXT_OPAD_PRESENT_V((opad)) | \
182 KEY_CONTEXT_SALT_PRESENT_V(1) | \
183 KEY_CONTEXT_CTX_LEN_V((ctx_len)))
184
185#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \
186 htonl( \
187 FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
188 FW_CRYPTO_LOOKASIDE_WR) | \
189 FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
190 FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \
191 FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
192 FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
193
194#define FILL_WR_RX_Q_ID(cid, qid, wr_iv) \
195 htonl( \
196 FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
197 FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
198 FW_CRYPTO_LOOKASIDE_WR_LCB_V(0) | \
199 FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)))
200
201#define FILL_ULPTX_CMD_DEST(cid) \
202 htonl(ULPTX_CMD_V(ULP_TX_PKT) | \
203 ULP_TXPKT_DEST_V(0) | \
204 ULP_TXPKT_DATAMODIFY_V(0) | \
205 ULP_TXPKT_CHANNELID_V((cid)) | \
206 ULP_TXPKT_RO_V(1) | \
207 ULP_TXPKT_FID_V(0))
208
209#define KEYCTX_ALIGN_PAD(bs) ({unsigned int _bs = (bs);\
210 _bs == SHA1_DIGEST_SIZE ? 12 : 0; })
211
212#define FILL_PLD_SIZE_HASH_SIZE(payload_sgl_len, sgl_lengths, total_frags) \
213 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(payload_sgl_len ? \
214 sgl_lengths[total_frags] : 0) |\
215 FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(0))
216
217#define FILL_LEN_PKD(calc_tx_flits_ofld, skb) \
218 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP((\
219 calc_tx_flits_ofld(skb) * 8), 16)))
220
221#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
222 ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1))
223
224#define MAX_NK 8
225#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
226
227struct algo_param {
228 unsigned int auth_mode;
229 unsigned int mk_size;
230 unsigned int result_size;
231};
232
233struct hash_wr_param {
234 unsigned int opad_needed;
235 unsigned int more;
236 unsigned int last;
237 struct algo_param alg_prm;
238 unsigned int sg_len;
239 unsigned int bfr_len;
240 u64 scmd1;
241};
242
243enum {
244 AES_KEYLENGTH_128BIT = 128,
245 AES_KEYLENGTH_192BIT = 192,
246 AES_KEYLENGTH_256BIT = 256
247};
248
249enum {
250 KEYLENGTH_3BYTES = 3,
251 KEYLENGTH_4BYTES = 4,
252 KEYLENGTH_6BYTES = 6,
253 KEYLENGTH_8BYTES = 8
254};
255
256enum {
257 NUMBER_OF_ROUNDS_10 = 10,
258 NUMBER_OF_ROUNDS_12 = 12,
259 NUMBER_OF_ROUNDS_14 = 14,
260};
261
262/*
263 * CCM defines values of 4, 6, 8, 10, 12, 14, and 16 octets,
264 * where they indicate the size of the integrity check value (ICV)
265 */
266enum {
267 AES_CCM_ICV_4 = 4,
268 AES_CCM_ICV_6 = 6,
269 AES_CCM_ICV_8 = 8,
270 AES_CCM_ICV_10 = 10,
271 AES_CCM_ICV_12 = 12,
272 AES_CCM_ICV_14 = 14,
273 AES_CCM_ICV_16 = 16
274};
275
276struct hash_op_params {
277 unsigned char mk_size;
278 unsigned char pad_align;
279 unsigned char auth_mode;
280 char hash_name[MAX_HASH_NAME];
281 unsigned short block_size;
282 unsigned short word_size;
283 unsigned short ipad_size;
284};
285
286struct phys_sge_pairs {
287 __be16 len[8];
288 __be64 addr[8];
289};
290
291struct phys_sge_parm {
292 unsigned int nents;
293 unsigned int obsize;
294 unsigned short qid;
295 unsigned char align;
296};
297
298struct crypto_result {
299 struct completion completion;
300 int err;
301};
302
303static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
304 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
305};
306
307static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
308 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
309 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
310};
311
312static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
313 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
314 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
315};
316
317static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = {
318 SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3,
319 SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7,
320};
321
322static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = {
323 SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3,
324 SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7,
325};
326
327static inline void copy_hash_init_values(char *key, int digestsize)
328{
329 u8 i;
330 __be32 *dkey = (__be32 *)key;
331 u64 *ldkey = (u64 *)key;
332 __be64 *sha384 = (__be64 *)sha384_init;
333 __be64 *sha512 = (__be64 *)sha512_init;
334
335 switch (digestsize) {
336 case SHA1_DIGEST_SIZE:
337 for (i = 0; i < SHA1_INIT_STATE; i++)
338 dkey[i] = cpu_to_be32(sha1_init[i]);
339 break;
340 case SHA224_DIGEST_SIZE:
341 for (i = 0; i < SHA224_INIT_STATE; i++)
342 dkey[i] = cpu_to_be32(sha224_init[i]);
343 break;
344 case SHA256_DIGEST_SIZE:
345 for (i = 0; i < SHA256_INIT_STATE; i++)
346 dkey[i] = cpu_to_be32(sha256_init[i]);
347 break;
348 case SHA384_DIGEST_SIZE:
349 for (i = 0; i < SHA384_INIT_STATE; i++)
350 ldkey[i] = be64_to_cpu(sha384[i]);
351 break;
352 case SHA512_DIGEST_SIZE:
353 for (i = 0; i < SHA512_INIT_STATE; i++)
354 ldkey[i] = be64_to_cpu(sha512[i]);
355 break;
356 }
357}
358
359static const u8 sgl_lengths[20] = {
360 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15
361};
362
363/* Number of len fields(8) * size of one addr field */
364#define PHYSDSGL_MAX_LEN_SIZE 16
365
366static inline u16 get_space_for_phys_dsgl(unsigned int sgl_entr)
367{
368 /* len field size + addr field size */
369 return ((sgl_entr >> 3) + ((sgl_entr % 8) ?
370 1 : 0)) * PHYSDSGL_MAX_LEN_SIZE +
371 (sgl_entr << 3) + ((sgl_entr % 2 ? 1 : 0) << 3);
372}
373
374/* The AES s-transform matrix (s-box). */
375static const u8 aes_sbox[256] = {
376 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215,
377 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175,
378 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165,
379 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7,
380 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 90,
381 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 32,
382 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170,
383 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81,
384 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243,
385 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100,
386 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184,
387 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 194,
388 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78,
389 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46, 28, 166,
390 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102,
391 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225, 248,
392 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
393 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84,
394 187, 22
395};
396
397static u32 aes_ks_subword(const u32 w)
398{
399 u8 bytes[4];
400
401 *(u32 *)(&bytes[0]) = w;
402 bytes[0] = aes_sbox[bytes[0]];
403 bytes[1] = aes_sbox[bytes[1]];
404 bytes[2] = aes_sbox[bytes[2]];
405 bytes[3] = aes_sbox[bytes[3]];
406 return *(u32 *)(&bytes[0]);
407}
408
409static u32 round_constant[11] = {
410 0x01000000, 0x02000000, 0x04000000, 0x08000000,
411 0x10000000, 0x20000000, 0x40000000, 0x80000000,
412 0x1B000000, 0x36000000, 0x6C000000
413};
414
415/* dec_key - OUTPUT - Reverse round key
416 * key - INPUT - key
417 * keylength - INPUT - length of the key in number of bits
418 */
419static inline void get_aes_decrypt_key(unsigned char *dec_key,
420 const unsigned char *key,
421 unsigned int keylength)
422{
423 u32 temp;
424 u32 w_ring[MAX_NK];
425 int i, j, k = 0;
426 u8 nr, nk;
427
428 switch (keylength) {
429 case AES_KEYLENGTH_128BIT:
430 nk = KEYLENGTH_4BYTES;
431 nr = NUMBER_OF_ROUNDS_10;
432 break;
433
434 case AES_KEYLENGTH_192BIT:
435 nk = KEYLENGTH_6BYTES;
436 nr = NUMBER_OF_ROUNDS_12;
437 break;
438 case AES_KEYLENGTH_256BIT:
439 nk = KEYLENGTH_8BYTES;
440 nr = NUMBER_OF_ROUNDS_14;
441 break;
442 default:
443 return;
444 }
445 for (i = 0; i < nk; i++ )
446 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
447
448 i = 0;
449 temp = w_ring[nk - 1];
450 while(i + nk < (nr + 1) * 4) {
451 if(!(i % nk)) {
452 /* RotWord(temp) */
453 temp = (temp << 8) | (temp >> 24);
454 temp = aes_ks_subword(temp);
455 temp ^= round_constant[i / nk];
456 }
457 else if (nk == 8 && (i % 4 == 0))
458 temp = aes_ks_subword(temp);
459 w_ring[i % nk] ^= temp;
460 temp = w_ring[i % nk];
461 i++;
462 }
463 for (k = 0, j = i % nk; k < nk; k++) {
464 *((u32 *)dec_key + k) = htonl(w_ring[j]);
465 j--;
466 if(j < 0)
467 j += nk;
468 }
469}
470
471#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
new file mode 100644
index 000000000000..2f6156b672ce
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -0,0 +1,240 @@
1/**
2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3 *
4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written and Maintained by:
11 * Manoj Malviya (manojmalviya@chelsio.com)
12 * Atul Gupta (atul.gupta@chelsio.com)
13 * Jitendra Lulla (jlulla@chelsio.com)
14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
15 * Harsh Jain (harsh@chelsio.com)
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/skbuff.h>
21
22#include <crypto/aes.h>
23#include <crypto/hash.h>
24
25#include "t4_msg.h"
26#include "chcr_core.h"
27#include "cxgb4_uld.h"
28
29static LIST_HEAD(uld_ctx_list);
30static DEFINE_MUTEX(dev_mutex);
31static atomic_t dev_count;
32
33typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
34static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
35static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
36static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
37
38static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
39 [CPL_FW6_PLD] = cpl_fw6_pld_handler,
40};
41
42static struct cxgb4_pci_uld_info chcr_uld_info = {
43 .name = DRV_MODULE_NAME,
44 .nrxq = 4,
45 .rxq_size = 1024,
46 .nciq = 0,
47 .ciq_size = 0,
48 .add = chcr_uld_add,
49 .state_change = chcr_uld_state_change,
50 .rx_handler = chcr_uld_rx_handler,
51};
52
53int assign_chcr_device(struct chcr_dev **dev)
54{
55 struct uld_ctx *u_ctx;
56
57 /*
58 * Which device to use if multiple devices are available TODO
59 * May be select the device based on round robin. One session
60 * must go to the same device to maintain the ordering.
61 */
62 mutex_lock(&dev_mutex); /* TODO ? */
63 u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
64 if (!u_ctx) {
65 mutex_unlock(&dev_mutex);
66 return -ENXIO;
67 }
68
69 *dev = u_ctx->dev;
70 mutex_unlock(&dev_mutex);
71 return 0;
72}
73
74static int chcr_dev_add(struct uld_ctx *u_ctx)
75{
76 struct chcr_dev *dev;
77
78 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
79 if (!dev)
80 return -ENXIO;
81
82 spin_lock_init(&dev->lock_chcr_dev);
83 u_ctx->dev = dev;
84 dev->u_ctx = u_ctx;
85 atomic_inc(&dev_count);
86 return 0;
87}
88
89static int chcr_dev_remove(struct uld_ctx *u_ctx)
90{
91 kfree(u_ctx->dev);
92 u_ctx->dev = NULL;
93 atomic_dec(&dev_count);
94 return 0;
95}
96
97static int cpl_fw6_pld_handler(struct chcr_dev *dev,
98 unsigned char *input)
99{
100 struct crypto_async_request *req;
101 struct cpl_fw6_pld *fw6_pld;
102 u32 ack_err_status = 0;
103 int error_status = 0;
104
105 fw6_pld = (struct cpl_fw6_pld *)input;
106 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
107 fw6_pld->data[1]);
108
109 ack_err_status =
110 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
111 if (ack_err_status) {
112 if (CHK_MAC_ERR_BIT(ack_err_status) ||
113 CHK_PAD_ERR_BIT(ack_err_status))
114 error_status = -EINVAL;
115 }
116 /* call completion callback with failure status */
117 if (req) {
118 if (!chcr_handle_resp(req, input, error_status))
119 req->complete(req, error_status);
120 else
121 return -EINVAL;
122 } else {
123 pr_err("Incorrect request address from the firmware\n");
124 return -EFAULT;
125 }
126 return 0;
127}
128
129int chcr_send_wr(struct sk_buff *skb)
130{
131 return cxgb4_ofld_send(skb->dev, skb);
132}
133
134static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
135{
136 struct uld_ctx *u_ctx;
137
138 /* Create the device and add it in the device list */
139 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
140 if (!u_ctx) {
141 u_ctx = ERR_PTR(-ENOMEM);
142 goto out;
143 }
144 u_ctx->lldi = *lld;
145 mutex_lock(&dev_mutex);
146 list_add_tail(&u_ctx->entry, &uld_ctx_list);
147 mutex_unlock(&dev_mutex);
148out:
149 return u_ctx;
150}
151
152int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
153 const struct pkt_gl *pgl)
154{
155 struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
156 struct chcr_dev *dev = u_ctx->dev;
157 const struct cpl_act_establish *rpl = (struct cpl_act_establish
158 *)rsp;
159
160 if (rpl->ot.opcode != CPL_FW6_PLD) {
161 pr_err("Unsupported opcode\n");
162 return 0;
163 }
164
165 if (!pgl)
166 work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]);
167 else
168 work_handlers[rpl->ot.opcode](dev, pgl->va);
169 return 0;
170}
171
172static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
173{
174 struct uld_ctx *u_ctx = handle;
175 int ret = 0;
176
177 switch (state) {
178 case CXGB4_STATE_UP:
179 if (!u_ctx->dev) {
180 ret = chcr_dev_add(u_ctx);
181 if (ret != 0)
182 return ret;
183 }
184 if (atomic_read(&dev_count) == 1)
185 ret = start_crypto();
186 break;
187
188 case CXGB4_STATE_DETACH:
189 if (u_ctx->dev) {
190 mutex_lock(&dev_mutex);
191 chcr_dev_remove(u_ctx);
192 mutex_unlock(&dev_mutex);
193 }
194 if (!atomic_read(&dev_count))
195 stop_crypto();
196 break;
197
198 case CXGB4_STATE_START_RECOVERY:
199 case CXGB4_STATE_DOWN:
200 default:
201 break;
202 }
203 return ret;
204}
205
206static int __init chcr_crypto_init(void)
207{
208 if (cxgb4_register_pci_uld(CXGB4_PCI_ULD1, &chcr_uld_info)) {
209 pr_err("ULD register fail: No chcr crypto support in cxgb4");
210 return -1;
211 }
212
213 return 0;
214}
215
216static void __exit chcr_crypto_exit(void)
217{
218 struct uld_ctx *u_ctx, *tmp;
219
220 if (atomic_read(&dev_count))
221 stop_crypto();
222
223 /* Remove all devices from list */
224 mutex_lock(&dev_mutex);
225 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
226 if (u_ctx->dev)
227 chcr_dev_remove(u_ctx);
228 kfree(u_ctx);
229 }
230 mutex_unlock(&dev_mutex);
231 cxgb4_unregister_pci_uld(CXGB4_PCI_ULD1);
232}
233
234module_init(chcr_crypto_init);
235module_exit(chcr_crypto_exit);
236
237MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
238MODULE_LICENSE("GPL");
239MODULE_AUTHOR("Chelsio Communications");
240MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
new file mode 100644
index 000000000000..2a5c671a4232
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -0,0 +1,80 @@
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 */
35
36#ifndef __CHCR_CORE_H__
37#define __CHCR_CORE_H__
38
39#include <crypto/algapi.h>
40#include "t4_hw.h"
41#include "cxgb4.h"
42#include "cxgb4_uld.h"
43
44#define DRV_MODULE_NAME "chcr"
45#define DRV_VERSION "1.0.0.0"
46
47#define MAX_PENDING_REQ_TO_HW 20
48#define CHCR_TEST_RESPONSE_TIMEOUT 1000
49
50#define PAD_ERROR_BIT 1
51#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
52
53#define MAC_ERROR_BIT 0
54#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
55
56struct uld_ctx;
57
58struct chcr_dev {
59 /* Request submited to h/w and waiting for response. */
60 spinlock_t lock_chcr_dev;
61 struct crypto_queue pending_queue;
62 struct uld_ctx *u_ctx;
63 unsigned char tx_channel_id;
64};
65
66struct uld_ctx {
67 struct list_head entry;
68 struct cxgb4_lld_info lldi;
69 struct chcr_dev *dev;
70};
71
72int assign_chcr_device(struct chcr_dev **dev);
73int chcr_send_wr(struct sk_buff *skb);
74int start_crypto(void);
75int stop_crypto(void);
76int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
77 const struct pkt_gl *pgl);
78int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
79 int err);
80#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
new file mode 100644
index 000000000000..d7d75605da8b
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -0,0 +1,203 @@
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 */
35
36#ifndef __CHCR_CRYPTO_H__
37#define __CHCR_CRYPTO_H__
38
39/* Define following if h/w is not dropping the AAD and IV data before
40 * giving the processed data
41 */
42
43#define CHCR_CRA_PRIORITY 300
44
45#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */
46#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */
47
48#define CHCR_MAX_AUTHENC_AES_KEY_LEN 32 /* max aes key length*/
49#define CHCR_MAX_AUTHENC_SHA_KEY_LEN 128 /* max sha key length*/
50
51#define CHCR_GIVENCRYPT_OP 2
52/* CPL/SCMD parameters */
53
54#define CHCR_ENCRYPT_OP 0
55#define CHCR_DECRYPT_OP 1
56
57#define CHCR_SCMD_SEQ_NO_CTRL_32BIT 1
58#define CHCR_SCMD_SEQ_NO_CTRL_48BIT 2
59#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3
60
61#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
62
63#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
64#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
65
66#define CHCR_SCMD_CIPHER_MODE_NOP 0
67#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
68#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
69#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
70
71#define CHCR_SCMD_AUTH_MODE_NOP 0
72#define CHCR_SCMD_AUTH_MODE_SHA1 1
73#define CHCR_SCMD_AUTH_MODE_SHA224 2
74#define CHCR_SCMD_AUTH_MODE_SHA256 3
75#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
76#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
77#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
78#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
79
80#define CHCR_SCMD_HMAC_CTRL_NOP 0
81#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
82
83#define CHCR_SCMD_IVGEN_CTRL_HW 0
84#define CHCR_SCMD_IVGEN_CTRL_SW 1
85/* This are not really mac key size. They are intermediate values
86 * of sha engine and its size
87 */
88#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0
89#define CHCR_KEYCTX_MAC_KEY_SIZE_160 1
90#define CHCR_KEYCTX_MAC_KEY_SIZE_192 2
91#define CHCR_KEYCTX_MAC_KEY_SIZE_256 3
92#define CHCR_KEYCTX_MAC_KEY_SIZE_512 4
93#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0
94#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192 1
95#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256 2
96#define CHCR_KEYCTX_NO_KEY 15
97
98#define CHCR_CPL_FW4_PLD_IV_OFFSET (5 * 64) /* bytes. flt #5 and #6 */
99#define CHCR_CPL_FW4_PLD_HASH_RESULT_OFFSET (7 * 64) /* bytes. flt #7 */
100#define CHCR_CPL_FW4_PLD_DATA_SIZE (4 * 64) /* bytes. flt #4 to #7 */
101
102#define KEY_CONTEXT_HDR_SALT_AND_PAD 16
103#define flits_to_bytes(x) (x * 8)
104
105#define IV_NOP 0
106#define IV_IMMEDIATE 1
107#define IV_DSGL 2
108
109#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
110#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
111#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
112 CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
113
114#define MAX_SALT 4
115#define MAX_SCRATCH_PAD_SIZE 32
116
117#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
118#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
119
120/* Aligned to 128 bit boundary */
121struct _key_ctx {
122 __be32 ctx_hdr;
123 u8 salt[MAX_SALT];
124 __be64 reserverd;
125 unsigned char key[0];
126};
127
128struct ablk_ctx {
129 u8 enc;
130 unsigned int processed_len;
131 __be32 key_ctx_hdr;
132 unsigned int enckey_len;
133 unsigned int dst_nents;
134 struct scatterlist iv_sg;
135 u8 key[CHCR_AES_MAX_KEY_LEN];
136 u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
137 unsigned char ciph_mode;
138};
139
140struct hmac_ctx {
141 struct shash_desc *desc;
142 u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
143 u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
144};
145
146struct __crypto_ctx {
147 struct hmac_ctx hmacctx[0];
148 struct ablk_ctx ablkctx[0];
149};
150
151struct chcr_context {
152 struct chcr_dev *dev;
153 unsigned char tx_channel_id;
154 struct __crypto_ctx crypto_ctx[0];
155};
156
157struct chcr_ahash_req_ctx {
158 u32 result;
159 char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
160 u8 bfr_len;
161 /* DMA the partial hash in it */
162 u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
163 u64 data_len; /* Data len till time */
164 void *dummy_payload_ptr;
165 /* SKB which is being sent to the hardware for processing */
166 struct sk_buff *skb;
167};
168
169struct chcr_blkcipher_req_ctx {
170 struct sk_buff *skb;
171};
172
173struct chcr_alg_template {
174 u32 type;
175 u32 is_registered;
176 union {
177 struct crypto_alg crypto;
178 struct ahash_alg hash;
179 } alg;
180};
181
182struct chcr_req_ctx {
183 union {
184 struct ahash_request *ahash_req;
185 struct ablkcipher_request *ablk_req;
186 } req;
187 union {
188 struct chcr_ahash_req_ctx *ahash_ctx;
189 struct chcr_blkcipher_req_ctx *ablk_ctx;
190 } ctx;
191};
192
193struct sge_opaque_hdr {
194 void *dev;
195 dma_addr_t addr[MAX_SKB_FRAGS + 1];
196};
197
198typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
199 struct chcr_context *ctx,
200 unsigned short qid,
201 unsigned short op_type);
202
203#endif /* __CHCR_CRYPTO_H__ */