aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorKim Phillips <kim.phillips@freescale.com>2011-03-13 04:54:26 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2011-03-26 22:45:16 -0400
commit8e8ec596e6c0144e2dd500a57ee23dde9684df46 (patch)
tree6ca9b342f37b3dc7a62621e544c8861920668117 /drivers/crypto
parent60af520cf264ea26b2af3a6871bbd71850522aea (diff)
crypto: caam - Add support for the Freescale SEC4/CAAM
The SEC4 supercedes the SEC2.x/3.x as Freescale's Integrated Security Engine. Its programming model is incompatible with all prior versions of the SEC (talitos). The SEC4 is also known as the Cryptographic Accelerator and Assurance Module (CAAM); this driver is named caam. This initial submission does not include support for Data Path mode operation - AEAD descriptors are submitted via the job ring interface, while the Queue Interface (QI) is enabled for use by others. Only AEAD algorithms are implemented at this time, for use with IPsec. Many thanks to the Freescale STC team for their contributions to this driver. Signed-off-by: Steve Cornelius <sec@pobox.com> Signed-off-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/caam/Kconfig72
-rw-r--r--drivers/crypto/caam/Makefile8
-rw-r--r--drivers/crypto/caam/caamalg.c1163
-rw-r--r--drivers/crypto/caam/compat.h35
-rw-r--r--drivers/crypto/caam/ctrl.c270
-rw-r--r--drivers/crypto/caam/desc.h1605
-rw-r--r--drivers/crypto/caam/desc_constr.h204
-rw-r--r--drivers/crypto/caam/error.c248
-rw-r--r--drivers/crypto/caam/error.h10
-rw-r--r--drivers/crypto/caam/intern.h113
-rw-r--r--drivers/crypto/caam/jr.c523
-rw-r--r--drivers/crypto/caam/jr.h21
-rw-r--r--drivers/crypto/caam/regs.h663
15 files changed, 4938 insertions, 0 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e54185223c8c..a27224aa883e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -200,6 +200,8 @@ config CRYPTO_DEV_HIFN_795X_RNG
200 Select this option if you want to enable the random number generator 200 Select this option if you want to enable the random number generator
201 on the HIFN 795x crypto adapters. 201 on the HIFN 795x crypto adapters.
202 202
203source drivers/crypto/caam/Kconfig
204
203config CRYPTO_DEV_TALITOS 205config CRYPTO_DEV_TALITOS
204 tristate "Talitos Freescale Security Engine (SEC)" 206 tristate "Talitos Freescale Security Engine (SEC)"
205 select CRYPTO_ALGAPI 207 select CRYPTO_ALGAPI
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 5203e34248d7..663c5efec13b 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,6 +6,7 @@ n2_crypto-y := n2_core.o n2_asm.o
6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o 7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
9obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 10obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 11obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o 12obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
new file mode 100644
index 000000000000..2d876bb98ff4
--- /dev/null
+++ b/drivers/crypto/caam/Kconfig
@@ -0,0 +1,72 @@
1config CRYPTO_DEV_FSL_CAAM
2 tristate "Freescale CAAM-Multicore driver backend"
3 depends on FSL_SOC
4 help
5 Enables the driver module for Freescale's Cryptographic Accelerator
6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
7 This module adds a job ring operation interface, and configures h/w
8 to operate as a DPAA component automatically, depending
9 on h/w feature availability.
10
11 To compile this driver as a module, choose M here: the module
12 will be called caam.
13
14config CRYPTO_DEV_FSL_CAAM_RINGSIZE
15 int "Job Ring size"
16 depends on CRYPTO_DEV_FSL_CAAM
17 range 2 9
18 default "9"
19 help
20 Select size of Job Rings as a power of 2, within the
21 range 2-9 (ring size 4-512).
22 Examples:
23 2 => 4
24 3 => 8
25 4 => 16
26 5 => 32
27 6 => 64
28 7 => 128
29 8 => 256
30 9 => 512
31
32config CRYPTO_DEV_FSL_CAAM_INTC
33 bool "Job Ring interrupt coalescing"
34 depends on CRYPTO_DEV_FSL_CAAM
35 default y
36 help
37 Enable the Job Ring's interrupt coalescing feature.
38
39config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
40 int "Job Ring interrupt coalescing count threshold"
41 depends on CRYPTO_DEV_FSL_CAAM_INTC
42 range 1 255
43 default 255
44 help
45 Select number of descriptor completions to queue before
46 raising an interrupt, in the range 1-255. Note that a selection
47 of 1 functionally defeats the coalescing feature, and a selection
48 equal or greater than the job ring size will force timeouts.
49
50config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
51 int "Job Ring interrupt coalescing timer threshold"
52 depends on CRYPTO_DEV_FSL_CAAM_INTC
53 range 1 65535
54 default 2048
55 help
56 Select number of bus clocks/64 to timeout in the case that one or
57 more descriptor completions are queued without reaching the count
58 threshold. Range is 1-65535.
59
60config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
61 tristate "Register algorithm implementations with the Crypto API"
62 depends on CRYPTO_DEV_FSL_CAAM
63 default y
64 select CRYPTO_ALGAPI
65 select CRYPTO_AUTHENC
66 help
67 Selecting this will offload crypto for users of the
68 scatterlist crypto API (such as the linux native IPSec
69 stack) to the SEC4 via job ring.
70
71 To compile this as a module, choose M here: the module
72 will be called caamalg.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
new file mode 100644
index 000000000000..ef39011b4505
--- /dev/null
+++ b/drivers/crypto/caam/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the CAAM backend and dependent components
3#
4
5obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
6obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7
8caam-objs := ctrl.o jr.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
new file mode 100644
index 000000000000..d7fe3d3d7db9
--- /dev/null
+++ b/drivers/crypto/caam/caamalg.c
@@ -0,0 +1,1163 @@
1/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | SEQ_IN_PTR |
41 * | (input buffer) |
42 * | LOAD (to DECO) |
43 * ---------------------
44 */
45
46#include "compat.h"
47
48#include "regs.h"
49#include "intern.h"
50#include "desc_constr.h"
51#include "jr.h"
52#include "error.h"
53
54/*
55 * crypto alg
56 */
57#define CAAM_CRA_PRIORITY 3000
58/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
59#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
60 SHA512_DIGEST_SIZE * 2)
61/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
62#define CAAM_MAX_IV_LENGTH 16
63
64#ifdef DEBUG
65/* for print_hex_dumps with line references */
66#define xstr(s) str(s)
67#define str(s) #s
68#define debug(format, arg...) printk(format, arg)
69#else
70#define debug(format, arg...)
71#endif
72
73/*
74 * per-session context
75 */
76struct caam_ctx {
77 struct device *jrdev;
78 u32 *sh_desc;
79 dma_addr_t shared_desc_phys;
80 u32 class1_alg_type;
81 u32 class2_alg_type;
82 u32 alg_op;
83 u8 *key;
84 dma_addr_t key_phys;
85 unsigned int keylen;
86 unsigned int enckeylen;
87 unsigned int authkeylen;
88 unsigned int split_key_len;
89 unsigned int split_key_pad_len;
90 unsigned int authsize;
91};
92
93static int aead_authenc_setauthsize(struct crypto_aead *authenc,
94 unsigned int authsize)
95{
96 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
97
98 ctx->authsize = authsize;
99
100 return 0;
101}
102
103struct split_key_result {
104 struct completion completion;
105 int err;
106};
107
108static void split_key_done(struct device *dev, u32 *desc, u32 err,
109 void *context)
110{
111 struct split_key_result *res = context;
112
113#ifdef DEBUG
114 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
115#endif
116 if (err) {
117 char tmp[256];
118
119 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
120 }
121
122 res->err = err;
123
124 complete(&res->completion);
125}
126
127/*
128get a split ipad/opad key
129
130Split key generation-----------------------------------------------
131
132[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
133[01] 0x04000014 key: class2->keyreg len=20
134 @0xffe01000
135[03] 0x84410014 operation: cls2-op sha1 hmac init dec
136[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
137[05] 0xa4000001 jump: class2 local all ->1 [06]
138[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
139 @0xffe04000
140*/
141static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
142{
143 struct device *jrdev = ctx->jrdev;
144 u32 *desc;
145 struct split_key_result result;
146 dma_addr_t dma_addr_in, dma_addr_out;
147 int ret = 0;
148
149 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
150
151 init_job_desc(desc, 0);
152
153 dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
154 DMA_TO_DEVICE);
155 if (dma_mapping_error(jrdev, dma_addr_in)) {
156 dev_err(jrdev, "unable to map key input memory\n");
157 kfree(desc);
158 return -ENOMEM;
159 }
160 append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
161 KEY_DEST_CLASS_REG);
162
163 /* Sets MDHA up into an HMAC-INIT */
164 append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
165 OP_ALG_AS_INIT);
166
167 /*
168 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
169 into both pads inside MDHA
170 */
171 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
172 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
173
174 /*
175 * FIFO_STORE with the explicit split-key content store
176 * (0x26 output type)
177 */
178 dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
179 DMA_FROM_DEVICE);
180 if (dma_mapping_error(jrdev, dma_addr_out)) {
181 dev_err(jrdev, "unable to map key output memory\n");
182 kfree(desc);
183 return -ENOMEM;
184 }
185 append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
186 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
187
188#ifdef DEBUG
189 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
190 DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
191 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
192 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
193#endif
194
195 result.err = 0;
196 init_completion(&result.completion);
197
198 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
199 if (!ret) {
200 /* in progress */
201 wait_for_completion_interruptible(&result.completion);
202 ret = result.err;
203#ifdef DEBUG
204 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
205 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
206 ctx->split_key_pad_len, 1);
207#endif
208 }
209
210 dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
211 DMA_FROM_DEVICE);
212 dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
213
214 kfree(desc);
215
216 return ret;
217}
218
219static int build_sh_desc_ipsec(struct caam_ctx *ctx)
220{
221 struct device *jrdev = ctx->jrdev;
222 u32 *sh_desc;
223 u32 *jump_cmd;
224
225 /* build shared descriptor for this session */
226 sh_desc = kmalloc(CAAM_CMD_SZ * 4 + ctx->split_key_pad_len +
227 ctx->enckeylen, GFP_DMA | GFP_KERNEL);
228 if (!sh_desc) {
229 dev_err(jrdev, "could not allocate shared descriptor\n");
230 return -ENOMEM;
231 }
232
233 init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL);
234
235 jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL |
236 JUMP_COND_SHRD | JUMP_COND_SELF);
237
238 /* process keys, starting with class 2/authentication */
239 append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len,
240 ctx->split_key_len,
241 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
242
243 append_key_as_imm(sh_desc, (void *)ctx->key + ctx->split_key_pad_len,
244 ctx->enckeylen, ctx->enckeylen,
245 CLASS_1 | KEY_DEST_CLASS_REG);
246
247 /* update jump cmd now that we are at the jump target */
248 set_jump_tgt_here(sh_desc, jump_cmd);
249
250 ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc,
251 desc_bytes(sh_desc),
252 DMA_TO_DEVICE);
253 if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) {
254 dev_err(jrdev, "unable to map shared descriptor\n");
255 kfree(sh_desc);
256 return -ENOMEM;
257 }
258
259 ctx->sh_desc = sh_desc;
260
261 return 0;
262}
263
264static int aead_authenc_setkey(struct crypto_aead *aead,
265 const u8 *key, unsigned int keylen)
266{
267 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
268 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
269 struct caam_ctx *ctx = crypto_aead_ctx(aead);
270 struct device *jrdev = ctx->jrdev;
271 struct rtattr *rta = (void *)key;
272 struct crypto_authenc_key_param *param;
273 unsigned int authkeylen;
274 unsigned int enckeylen;
275 int ret = 0;
276
277 param = RTA_DATA(rta);
278 enckeylen = be32_to_cpu(param->enckeylen);
279
280 key += RTA_ALIGN(rta->rta_len);
281 keylen -= RTA_ALIGN(rta->rta_len);
282
283 if (keylen < enckeylen)
284 goto badkey;
285
286 authkeylen = keylen - enckeylen;
287
288 if (keylen > CAAM_MAX_KEY_SIZE)
289 goto badkey;
290
291 /* Pick class 2 key length from algorithm submask */
292 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
293 OP_ALG_ALGSEL_SHIFT] * 2;
294 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
295
296#ifdef DEBUG
297 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
298 keylen, enckeylen, authkeylen);
299 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
300 ctx->split_key_len, ctx->split_key_pad_len);
301 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
302 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
303#endif
304 ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen,
305 GFP_KERNEL | GFP_DMA);
306 if (!ctx->key) {
307 dev_err(jrdev, "could not allocate key output memory\n");
308 return -ENOMEM;
309 }
310
311 ret = gen_split_key(ctx, key, authkeylen);
312 if (ret) {
313 kfree(ctx->key);
314 goto badkey;
315 }
316
317 /* postpend encryption key to auth split key */
318 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
319
320 ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
321 enckeylen, DMA_TO_DEVICE);
322 if (dma_mapping_error(jrdev, ctx->key_phys)) {
323 dev_err(jrdev, "unable to map key i/o memory\n");
324 kfree(ctx->key);
325 return -ENOMEM;
326 }
327#ifdef DEBUG
328 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
329 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
330 ctx->split_key_pad_len + enckeylen, 1);
331#endif
332
333 ctx->keylen = keylen;
334 ctx->enckeylen = enckeylen;
335 ctx->authkeylen = authkeylen;
336
337 ret = build_sh_desc_ipsec(ctx);
338 if (ret) {
339 dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len +
340 enckeylen, DMA_TO_DEVICE);
341 kfree(ctx->key);
342 }
343
344 return ret;
345badkey:
346 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
347 return -EINVAL;
348}
349
350struct link_tbl_entry {
351 u64 ptr;
352 u32 len;
353 u8 reserved;
354 u8 buf_pool_id;
355 u16 offset;
356};
357
358/*
359 * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
360 * @src_nents: number of segments in input scatterlist
361 * @dst_nents: number of segments in output scatterlist
362 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
363 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
364 * @link_tbl_bytes: length of dma mapped link_tbl space
365 * @link_tbl_dma: bus physical mapped address of h/w link table
366 * @hw_desc: the h/w job descriptor followed by any referenced link tables
367 */
368struct ipsec_esp_edesc {
369 int assoc_nents;
370 int src_nents;
371 int dst_nents;
372 int link_tbl_bytes;
373 dma_addr_t link_tbl_dma;
374 struct link_tbl_entry *link_tbl;
375 u32 hw_desc[0];
376};
377
378static void ipsec_esp_unmap(struct device *dev,
379 struct ipsec_esp_edesc *edesc,
380 struct aead_request *areq)
381{
382 dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
383
384 if (unlikely(areq->dst != areq->src)) {
385 dma_unmap_sg(dev, areq->src, edesc->src_nents,
386 DMA_TO_DEVICE);
387 dma_unmap_sg(dev, areq->dst, edesc->dst_nents,
388 DMA_FROM_DEVICE);
389 } else {
390 dma_unmap_sg(dev, areq->src, edesc->src_nents,
391 DMA_BIDIRECTIONAL);
392 }
393
394 if (edesc->link_tbl_bytes)
395 dma_unmap_single(dev, edesc->link_tbl_dma,
396 edesc->link_tbl_bytes,
397 DMA_TO_DEVICE);
398}
399
400/*
401 * ipsec_esp descriptor callbacks
402 */
403static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
404 void *context)
405{
406 struct aead_request *areq = context;
407 struct ipsec_esp_edesc *edesc;
408#ifdef DEBUG
409 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
410 int ivsize = crypto_aead_ivsize(aead);
411 struct caam_ctx *ctx = crypto_aead_ctx(aead);
412
413 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
414#endif
415 edesc = (struct ipsec_esp_edesc *)((char *)desc -
416 offsetof(struct ipsec_esp_edesc, hw_desc));
417
418 if (err) {
419 char tmp[256];
420
421 dev_err(jrdev, "%s\n", caam_jr_strstatus(tmp, err));
422 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
423 }
424
425 ipsec_esp_unmap(jrdev, edesc, areq);
426
427#ifdef DEBUG
428 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
429 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
430 areq->assoclen , 1);
431 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
432 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
433 edesc->src_nents ? 100 : ivsize, 1);
434 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
435 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
436 edesc->src_nents ? 100 : areq->cryptlen +
437 ctx->authsize + 4, 1);
438#endif
439
440 kfree(edesc);
441
442 aead_request_complete(areq, err);
443}
444
445static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
446 void *context)
447{
448 struct aead_request *areq = context;
449 struct ipsec_esp_edesc *edesc;
450#ifdef DEBUG
451 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
452 struct caam_ctx *ctx = crypto_aead_ctx(aead);
453
454 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
455#endif
456 edesc = (struct ipsec_esp_edesc *)((char *)desc -
457 offsetof(struct ipsec_esp_edesc, hw_desc));
458
459 if (err) {
460 char tmp[256];
461
462 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
463 }
464
465 ipsec_esp_unmap(jrdev, edesc, areq);
466
467 /*
468 * verify hw auth check passed else return -EBADMSG
469 */
470 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
471 err = -EBADMSG;
472
473#ifdef DEBUG
474 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
475 DUMP_PREFIX_ADDRESS, 16, 4,
476 ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)),
477 sizeof(struct iphdr) + areq->assoclen +
478 ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) +
479 ctx->authsize + 36, 1);
480 if (!err && edesc->link_tbl_bytes) {
481 struct scatterlist *sg = sg_last(areq->src, edesc->src_nents);
482 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
483 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
484 sg->length + ctx->authsize + 16, 1);
485 }
486#endif
487 kfree(edesc);
488
489 aead_request_complete(areq, err);
490}
491
492/*
493 * convert scatterlist to h/w link table format
494 * scatterlist must have been previously dma mapped
495 */
496static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
497 struct link_tbl_entry *link_tbl_ptr, u32 offset)
498{
499 while (sg_count) {
500 link_tbl_ptr->ptr = sg_dma_address(sg);
501 link_tbl_ptr->len = sg_dma_len(sg);
502 link_tbl_ptr->reserved = 0;
503 link_tbl_ptr->buf_pool_id = 0;
504 link_tbl_ptr->offset = offset;
505 link_tbl_ptr++;
506 sg = sg_next(sg);
507 sg_count--;
508 }
509
510 /* set Final bit (marks end of link table) */
511 link_tbl_ptr--;
512 link_tbl_ptr->len |= 0x40000000;
513}
514
515/*
516 * fill in and submit ipsec_esp job descriptor
517 */
518static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
519 u32 encrypt,
520 void (*callback) (struct device *dev, u32 *desc,
521 u32 err, void *context))
522{
523 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
524 struct caam_ctx *ctx = crypto_aead_ctx(aead);
525 struct device *jrdev = ctx->jrdev;
526 u32 *desc = edesc->hw_desc, options;
527 int ret, sg_count, assoc_sg_count;
528 int ivsize = crypto_aead_ivsize(aead);
529 int authsize = ctx->authsize;
530 dma_addr_t ptr, dst_dma, src_dma;
531#ifdef DEBUG
532 u32 *sh_desc = ctx->sh_desc;
533
534 debug("assoclen %d cryptlen %d authsize %d\n",
535 areq->assoclen, areq->cryptlen, authsize);
536 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
537 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
538 areq->assoclen , 1);
539 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
540 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
541 edesc->src_nents ? 100 : ivsize, 1);
542 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
543 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
544 edesc->src_nents ? 100 : areq->cryptlen + authsize, 1);
545 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
546 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
547 desc_bytes(sh_desc), 1);
548#endif
549 assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1,
550 DMA_TO_DEVICE);
551 if (areq->src == areq->dst)
552 sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
553 DMA_BIDIRECTIONAL);
554 else
555 sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
556 DMA_TO_DEVICE);
557
558 /* start auth operation */
559 append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL |
560 (encrypt ? : OP_ALG_ICV_ON));
561
562 /* Load FIFO with data for Class 2 CHA */
563 options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
564 if (!edesc->assoc_nents) {
565 ptr = sg_dma_address(areq->assoc);
566 } else {
567 sg_to_link_tbl(areq->assoc, edesc->assoc_nents,
568 edesc->link_tbl, 0);
569 ptr = edesc->link_tbl_dma;
570 options |= LDST_SGF;
571 }
572 append_fifo_load(desc, ptr, areq->assoclen, options);
573
574 /* copy iv from cipher/class1 input context to class2 infifo */
575 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
576
577 /* start class 1 (cipher) operation */
578 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
579 encrypt);
580
581 /* load payload & instruct to class2 to snoop class 1 if encrypting */
582 options = 0;
583 if (!edesc->src_nents) {
584 src_dma = sg_dma_address(areq->src);
585 } else {
586 sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl +
587 edesc->assoc_nents, 0);
588 src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
589 sizeof(struct link_tbl_entry);
590 options |= LDST_SGF;
591 }
592 append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options);
593 append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH |
594 FIFOLD_TYPE_LASTBOTH |
595 (encrypt ? FIFOLD_TYPE_MSG1OUT2
596 : FIFOLD_TYPE_MSG));
597
598 /* specify destination */
599 if (areq->src == areq->dst) {
600 dst_dma = src_dma;
601 } else {
602 sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1,
603 DMA_FROM_DEVICE);
604 if (!edesc->dst_nents) {
605 dst_dma = sg_dma_address(areq->dst);
606 options = 0;
607 } else {
608 sg_to_link_tbl(areq->dst, edesc->dst_nents,
609 edesc->link_tbl + edesc->assoc_nents +
610 edesc->src_nents, 0);
611 dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
612 edesc->src_nents) *
613 sizeof(struct link_tbl_entry);
614 options = LDST_SGF;
615 }
616 }
617 append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options);
618 append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
619
620 /* ICV */
621 if (encrypt)
622 append_seq_store(desc, authsize, LDST_CLASS_2_CCB |
623 LDST_SRCDST_BYTE_CONTEXT);
624 else
625 append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 |
626 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
627
628#ifdef DEBUG
629 debug("job_desc_len %d\n", desc_len(desc));
630 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
631 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1);
632 print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ",
633 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
634 edesc->link_tbl_bytes, 1);
635#endif
636
637 ret = caam_jr_enqueue(jrdev, desc, callback, areq);
638 if (!ret)
639 ret = -EINPROGRESS;
640 else {
641 ipsec_esp_unmap(jrdev, edesc, areq);
642 kfree(edesc);
643 }
644
645 return ret;
646}
647
648/*
649 * derive number of elements in scatterlist
650 */
651static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
652{
653 struct scatterlist *sg = sg_list;
654 int sg_nents = 0;
655
656 *chained = 0;
657 while (nbytes > 0) {
658 sg_nents++;
659 nbytes -= sg->length;
660 if (!sg_is_last(sg) && (sg + 1)->length == 0)
661 *chained = 1;
662 sg = scatterwalk_sg_next(sg);
663 }
664
665 return sg_nents;
666}
667
668/*
669 * allocate and map the ipsec_esp extended descriptor
670 */
671static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
672 int desc_bytes)
673{
674 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
675 struct caam_ctx *ctx = crypto_aead_ctx(aead);
676 struct device *jrdev = ctx->jrdev;
677 gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
678 GFP_ATOMIC;
679 int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
680 struct ipsec_esp_edesc *edesc;
681
682 assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained);
683 BUG_ON(chained);
684 if (likely(assoc_nents == 1))
685 assoc_nents = 0;
686
687 src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize,
688 &chained);
689 BUG_ON(chained);
690 if (src_nents == 1)
691 src_nents = 0;
692
693 if (unlikely(areq->dst != areq->src)) {
694 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize,
695 &chained);
696 BUG_ON(chained);
697 if (dst_nents == 1)
698 dst_nents = 0;
699 }
700
701 link_tbl_bytes = (assoc_nents + src_nents + dst_nents) *
702 sizeof(struct link_tbl_entry);
703 debug("link_tbl_bytes %d\n", link_tbl_bytes);
704
705 /* allocate space for base edesc and hw desc commands, link tables */
706 edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes +
707 link_tbl_bytes, GFP_DMA | flags);
708 if (!edesc) {
709 dev_err(jrdev, "could not allocate extended descriptor\n");
710 return ERR_PTR(-ENOMEM);
711 }
712
713 edesc->assoc_nents = assoc_nents;
714 edesc->src_nents = src_nents;
715 edesc->dst_nents = dst_nents;
716 edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) +
717 desc_bytes;
718 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
719 link_tbl_bytes, DMA_TO_DEVICE);
720 edesc->link_tbl_bytes = link_tbl_bytes;
721
722 return edesc;
723}
724
725static int aead_authenc_encrypt(struct aead_request *areq)
726{
727 struct ipsec_esp_edesc *edesc;
728 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
729 struct caam_ctx *ctx = crypto_aead_ctx(aead);
730 struct device *jrdev = ctx->jrdev;
731 int ivsize = crypto_aead_ivsize(aead);
732 u32 *desc;
733 dma_addr_t iv_dma;
734
735 /* allocate extended descriptor */
736 edesc = ipsec_esp_edesc_alloc(areq, 21 * sizeof(u32));
737 if (IS_ERR(edesc))
738 return PTR_ERR(edesc);
739
740 desc = edesc->hw_desc;
741
742 /* insert shared descriptor pointer */
743 init_job_desc_shared(desc, ctx->shared_desc_phys,
744 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
745
746 iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE);
747 /* check dma error */
748
749 append_load(desc, iv_dma, ivsize,
750 LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
751
752 return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
753}
754
755static int aead_authenc_decrypt(struct aead_request *req)
756{
757 struct crypto_aead *aead = crypto_aead_reqtfm(req);
758 int ivsize = crypto_aead_ivsize(aead);
759 struct caam_ctx *ctx = crypto_aead_ctx(aead);
760 struct device *jrdev = ctx->jrdev;
761 struct ipsec_esp_edesc *edesc;
762 u32 *desc;
763 dma_addr_t iv_dma;
764
765 req->cryptlen -= ctx->authsize;
766
767 /* allocate extended descriptor */
768 edesc = ipsec_esp_edesc_alloc(req, 21 * sizeof(u32));
769 if (IS_ERR(edesc))
770 return PTR_ERR(edesc);
771
772 desc = edesc->hw_desc;
773
774 /* insert shared descriptor pointer */
775 init_job_desc_shared(desc, ctx->shared_desc_phys,
776 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
777
778 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
779 /* check dma error */
780
781 append_load(desc, iv_dma, ivsize,
782 LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
783
784 return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done);
785}
786
787static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
788{
789 struct aead_request *areq = &req->areq;
790 struct ipsec_esp_edesc *edesc;
791 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
792 struct caam_ctx *ctx = crypto_aead_ctx(aead);
793 struct device *jrdev = ctx->jrdev;
794 int ivsize = crypto_aead_ivsize(aead);
795 dma_addr_t iv_dma;
796 u32 *desc;
797
798 iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE);
799
800 debug("%s: giv %p\n", __func__, req->giv);
801
802 /* allocate extended descriptor */
803 edesc = ipsec_esp_edesc_alloc(areq, 27 * sizeof(u32));
804 if (IS_ERR(edesc))
805 return PTR_ERR(edesc);
806
807 desc = edesc->hw_desc;
808
809 /* insert shared descriptor pointer */
810 init_job_desc_shared(desc, ctx->shared_desc_phys,
811 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
812
813 /*
814 * LOAD IMM Info FIFO
815 * to DECO, Last, Padding, Random, Message, 16 bytes
816 */
817 append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 |
818 NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG |
819 NFIFOENTRY_PTYPE_RND | ivsize,
820 LDST_SRCDST_WORD_INFO_FIFO);
821
822 /*
823 * disable info fifo entries since the above serves as the entry
824 * this way, the MOVE command won't generate an entry.
825 * Note that this isn't required in more recent versions of
826 * SEC as a MOVE that doesn't do info FIFO entries is available.
827 */
828 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
829
830 /* MOVE DECO Alignment -> C1 Context 16 bytes */
831 append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
832 MOVE_DEST_CLASS1CTX | ivsize);
833
834 /* re-enable info fifo entries */
835 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
836
837 /* MOVE C1 Context -> OFIFO 16 bytes */
838 append_move(desc, MOVE_WAITCOMP | MOVE_SRC_CLASS1CTX |
839 MOVE_DEST_OUTFIFO | ivsize);
840
841 append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
842
843 return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
844}
845
846struct caam_alg_template {
847 char name[CRYPTO_MAX_ALG_NAME];
848 char driver_name[CRYPTO_MAX_ALG_NAME];
849 unsigned int blocksize;
850 struct aead_alg aead;
851 u32 class1_alg_type;
852 u32 class2_alg_type;
853 u32 alg_op;
854};
855
856static struct caam_alg_template driver_algs[] = {
857 /* single-pass ipsec_esp descriptor */
858 {
859 .name = "authenc(hmac(sha1),cbc(aes))",
860 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
861 .blocksize = AES_BLOCK_SIZE,
862 .aead = {
863 .setkey = aead_authenc_setkey,
864 .setauthsize = aead_authenc_setauthsize,
865 .encrypt = aead_authenc_encrypt,
866 .decrypt = aead_authenc_decrypt,
867 .givencrypt = aead_authenc_givencrypt,
868 .geniv = "<built-in>",
869 .ivsize = AES_BLOCK_SIZE,
870 .maxauthsize = SHA1_DIGEST_SIZE,
871 },
872 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
873 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
874 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
875 },
876 {
877 .name = "authenc(hmac(sha256),cbc(aes))",
878 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
879 .blocksize = AES_BLOCK_SIZE,
880 .aead = {
881 .setkey = aead_authenc_setkey,
882 .setauthsize = aead_authenc_setauthsize,
883 .encrypt = aead_authenc_encrypt,
884 .decrypt = aead_authenc_decrypt,
885 .givencrypt = aead_authenc_givencrypt,
886 .geniv = "<built-in>",
887 .ivsize = AES_BLOCK_SIZE,
888 .maxauthsize = SHA256_DIGEST_SIZE,
889 },
890 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
891 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
892 OP_ALG_AAI_HMAC_PRECOMP,
893 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
894 },
895 {
896 .name = "authenc(hmac(sha1),cbc(des3_ede))",
897 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
898 .blocksize = DES3_EDE_BLOCK_SIZE,
899 .aead = {
900 .setkey = aead_authenc_setkey,
901 .setauthsize = aead_authenc_setauthsize,
902 .encrypt = aead_authenc_encrypt,
903 .decrypt = aead_authenc_decrypt,
904 .givencrypt = aead_authenc_givencrypt,
905 .geniv = "<built-in>",
906 .ivsize = DES3_EDE_BLOCK_SIZE,
907 .maxauthsize = SHA1_DIGEST_SIZE,
908 },
909 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
910 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
911 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
912 },
913 {
914 .name = "authenc(hmac(sha256),cbc(des3_ede))",
915 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
916 .blocksize = DES3_EDE_BLOCK_SIZE,
917 .aead = {
918 .setkey = aead_authenc_setkey,
919 .setauthsize = aead_authenc_setauthsize,
920 .encrypt = aead_authenc_encrypt,
921 .decrypt = aead_authenc_decrypt,
922 .givencrypt = aead_authenc_givencrypt,
923 .geniv = "<built-in>",
924 .ivsize = DES3_EDE_BLOCK_SIZE,
925 .maxauthsize = SHA256_DIGEST_SIZE,
926 },
927 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
928 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
929 OP_ALG_AAI_HMAC_PRECOMP,
930 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
931 },
932 {
933 .name = "authenc(hmac(sha1),cbc(des))",
934 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
935 .blocksize = DES_BLOCK_SIZE,
936 .aead = {
937 .setkey = aead_authenc_setkey,
938 .setauthsize = aead_authenc_setauthsize,
939 .encrypt = aead_authenc_encrypt,
940 .decrypt = aead_authenc_decrypt,
941 .givencrypt = aead_authenc_givencrypt,
942 .geniv = "<built-in>",
943 .ivsize = DES_BLOCK_SIZE,
944 .maxauthsize = SHA1_DIGEST_SIZE,
945 },
946 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
947 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
948 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
949 },
950 {
951 .name = "authenc(hmac(sha256),cbc(des))",
952 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
953 .blocksize = DES_BLOCK_SIZE,
954 .aead = {
955 .setkey = aead_authenc_setkey,
956 .setauthsize = aead_authenc_setauthsize,
957 .encrypt = aead_authenc_encrypt,
958 .decrypt = aead_authenc_decrypt,
959 .givencrypt = aead_authenc_givencrypt,
960 .geniv = "<built-in>",
961 .ivsize = DES_BLOCK_SIZE,
962 .maxauthsize = SHA256_DIGEST_SIZE,
963 },
964 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
965 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
966 OP_ALG_AAI_HMAC_PRECOMP,
967 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
968 },
969};
970
971struct caam_crypto_alg {
972 struct list_head entry;
973 struct device *ctrldev;
974 int class1_alg_type;
975 int class2_alg_type;
976 int alg_op;
977 struct crypto_alg crypto_alg;
978};
979
980static int caam_cra_init(struct crypto_tfm *tfm)
981{
982 struct crypto_alg *alg = tfm->__crt_alg;
983 struct caam_crypto_alg *caam_alg =
984 container_of(alg, struct caam_crypto_alg, crypto_alg);
985 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
986 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
987 int tgt_jr = atomic_inc_return(&priv->tfm_count);
988
989 /*
990 * distribute tfms across job rings to ensure in-order
991 * crypto request processing per tfm
992 */
993 ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
994
995 /* copy descriptor header template value */
996 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
997 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
998 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
999
1000 return 0;
1001}
1002
1003static void caam_cra_exit(struct crypto_tfm *tfm)
1004{
1005 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1006
1007 if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys))
1008 dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys,
1009 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
1010 kfree(ctx->sh_desc);
1011}
1012
1013static void __exit caam_algapi_exit(void)
1014{
1015
1016 struct device_node *dev_node;
1017 struct platform_device *pdev;
1018 struct device *ctrldev;
1019 struct caam_drv_private *priv;
1020 struct caam_crypto_alg *t_alg, *n;
1021 int i, err;
1022
1023 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1024 if (!dev_node)
1025 return;
1026
1027 pdev = of_find_device_by_node(dev_node);
1028 if (!pdev)
1029 return;
1030
1031 ctrldev = &pdev->dev;
1032 of_node_put(dev_node);
1033 priv = dev_get_drvdata(ctrldev);
1034
1035 if (!priv->alg_list.next)
1036 return;
1037
1038 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1039 crypto_unregister_alg(&t_alg->crypto_alg);
1040 list_del(&t_alg->entry);
1041 kfree(t_alg);
1042 }
1043
1044 for (i = 0; i < priv->total_jobrs; i++) {
1045 err = caam_jr_deregister(priv->algapi_jr[i]);
1046 if (err < 0)
1047 break;
1048 }
1049 kfree(priv->algapi_jr);
1050}
1051
1052static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
1053 struct caam_alg_template
1054 *template)
1055{
1056 struct caam_crypto_alg *t_alg;
1057 struct crypto_alg *alg;
1058
1059 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
1060 if (!t_alg) {
1061 dev_err(ctrldev, "failed to allocate t_alg\n");
1062 return ERR_PTR(-ENOMEM);
1063 }
1064
1065 alg = &t_alg->crypto_alg;
1066
1067 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1068 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1069 template->driver_name);
1070 alg->cra_module = THIS_MODULE;
1071 alg->cra_init = caam_cra_init;
1072 alg->cra_exit = caam_cra_exit;
1073 alg->cra_priority = CAAM_CRA_PRIORITY;
1074 alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
1075 alg->cra_blocksize = template->blocksize;
1076 alg->cra_alignmask = 0;
1077 alg->cra_type = &crypto_aead_type;
1078 alg->cra_ctxsize = sizeof(struct caam_ctx);
1079 alg->cra_u.aead = template->aead;
1080
1081 t_alg->class1_alg_type = template->class1_alg_type;
1082 t_alg->class2_alg_type = template->class2_alg_type;
1083 t_alg->alg_op = template->alg_op;
1084 t_alg->ctrldev = ctrldev;
1085
1086 return t_alg;
1087}
1088
1089static int __init caam_algapi_init(void)
1090{
1091 struct device_node *dev_node;
1092 struct platform_device *pdev;
1093 struct device *ctrldev, **jrdev;
1094 struct caam_drv_private *priv;
1095 int i = 0, err = 0;
1096
1097 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1098 if (!dev_node)
1099 return -ENODEV;
1100
1101 pdev = of_find_device_by_node(dev_node);
1102 if (!pdev)
1103 return -ENODEV;
1104
1105 ctrldev = &pdev->dev;
1106 priv = dev_get_drvdata(ctrldev);
1107 of_node_put(dev_node);
1108
1109 INIT_LIST_HEAD(&priv->alg_list);
1110
1111 jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
1112 if (!jrdev)
1113 return -ENOMEM;
1114
1115 for (i = 0; i < priv->total_jobrs; i++) {
1116 err = caam_jr_register(ctrldev, &jrdev[i]);
1117 if (err < 0)
1118 break;
1119 }
1120 if (err < 0 && i == 0) {
1121 dev_err(ctrldev, "algapi error in job ring registration: %d\n",
1122 err);
1123 return err;
1124 }
1125
1126 priv->num_jrs_for_algapi = i;
1127 priv->algapi_jr = jrdev;
1128 atomic_set(&priv->tfm_count, -1);
1129
1130 /* register crypto algorithms the device supports */
1131 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1132 /* TODO: check if h/w supports alg */
1133 struct caam_crypto_alg *t_alg;
1134
1135 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
1136 if (IS_ERR(t_alg)) {
1137 err = PTR_ERR(t_alg);
1138 dev_warn(ctrldev, "%s alg allocation failed\n",
1139 t_alg->crypto_alg.cra_driver_name);
1140 continue;
1141 }
1142
1143 err = crypto_register_alg(&t_alg->crypto_alg);
1144 if (err) {
1145 dev_warn(ctrldev, "%s alg registration failed\n",
1146 t_alg->crypto_alg.cra_driver_name);
1147 kfree(t_alg);
1148 } else {
1149 list_add_tail(&t_alg->entry, &priv->alg_list);
1150 dev_info(ctrldev, "%s\n",
1151 t_alg->crypto_alg.cra_driver_name);
1152 }
1153 }
1154
1155 return err;
1156}
1157
1158module_init(caam_algapi_init);
1159module_exit(caam_algapi_exit);
1160
1161MODULE_LICENSE("GPL");
1162MODULE_DESCRIPTION("FSL CAAM support for crypto API");
1163MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
new file mode 100644
index 000000000000..950450346f70
--- /dev/null
+++ b/drivers/crypto/caam/compat.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2008-2011 Freescale Semiconductor, Inc.
3 */
4
5#ifndef CAAM_COMPAT_H
6#define CAAM_COMPAT_H
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mod_devicetable.h>
11#include <linux/device.h>
12#include <linux/interrupt.h>
13#include <linux/crypto.h>
14#include <linux/hw_random.h>
15#include <linux/of_platform.h>
16#include <linux/dma-mapping.h>
17#include <linux/io.h>
18#include <linux/spinlock.h>
19#include <linux/rtnetlink.h>
20#include <linux/in.h>
21#include <linux/slab.h>
22#include <linux/types.h>
23#include <linux/debugfs.h>
24#include <linux/circ_buf.h>
25#include <net/xfrm.h>
26
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/des.h>
30#include <crypto/sha.h>
31#include <crypto/aead.h>
32#include <crypto/authenc.h>
33#include <crypto/scatterwalk.h>
34
35#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
new file mode 100644
index 000000000000..aa2216160103
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.c
@@ -0,0 +1,270 @@
1/*
2 * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#include "compat.h"
9#include "regs.h"
10#include "intern.h"
11#include "jr.h"
12
13static int caam_remove(struct platform_device *pdev)
14{
15 struct device *ctrldev;
16 struct caam_drv_private *ctrlpriv;
17 struct caam_drv_private_jr *jrpriv;
18 struct caam_full __iomem *topregs;
19 int ring, ret = 0;
20
21 ctrldev = &pdev->dev;
22 ctrlpriv = dev_get_drvdata(ctrldev);
23 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
24
25 /* shut down JobRs */
26 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
27 ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
28 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
29 irq_dispose_mapping(jrpriv->irq);
30 }
31
32 /* Shut down debug views */
33#ifdef CONFIG_DEBUG_FS
34 debugfs_remove_recursive(ctrlpriv->dfs_root);
35#endif
36
37 /* Unmap controller region */
38 iounmap(&topregs->ctrl);
39
40 kfree(ctrlpriv->jrdev);
41 kfree(ctrlpriv);
42
43 return ret;
44}
45
46/* Probe routine for CAAM top (controller) level */
47static int caam_probe(struct platform_device *pdev,
48 const struct of_device_id *devmatch)
49{
50 int d, ring, rspec;
51 struct device *dev;
52 struct device_node *nprop, *np;
53 struct caam_ctrl __iomem *ctrl;
54 struct caam_full __iomem *topregs;
55 struct caam_drv_private *ctrlpriv;
56 struct caam_perfmon *perfmon;
57 struct caam_deco **deco;
58 u32 deconum;
59
60 ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
61 if (!ctrlpriv)
62 return -ENOMEM;
63
64 dev = &pdev->dev;
65 dev_set_drvdata(dev, ctrlpriv);
66 ctrlpriv->pdev = pdev;
67 nprop = pdev->dev.of_node;
68
69 /* Get configuration properties from device tree */
70 /* First, get register page */
71 ctrl = of_iomap(nprop, 0);
72 if (ctrl == NULL) {
73 dev_err(dev, "caam: of_iomap() failed\n");
74 return -ENOMEM;
75 }
76 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
77
78 /* topregs used to derive pointers to CAAM sub-blocks only */
79 topregs = (struct caam_full __iomem *)ctrl;
80
81 /* Get the IRQ of the controller (for security violations only) */
82 ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
83
84 /*
85 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
86 * 36-bit pointers in master configuration register
87 */
88 setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
89 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
90
91 if (sizeof(dma_addr_t) == sizeof(u64))
92 dma_set_mask(dev, DMA_BIT_MASK(36));
93
94 /* Find out how many DECOs are present */
95 deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
96 CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
97
98 ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
99 GFP_KERNEL);
100
101 deco = (struct caam_deco __force **)&topregs->deco;
102 for (d = 0; d < deconum; d++)
103 ctrlpriv->deco[d] = deco[d];
104
105 /*
106 * Detect and enable JobRs
107 * First, find out how many ring spec'ed, allocate references
108 * for all, then go probe each one.
109 */
110 rspec = 0;
111 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
112 rspec++;
113 ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
114 if (ctrlpriv->jrdev == NULL) {
115 iounmap(&topregs->ctrl);
116 return -ENOMEM;
117 }
118
119 ring = 0;
120 ctrlpriv->total_jobrs = 0;
121 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
122 caam_jr_probe(pdev, np, ring);
123 ctrlpriv->total_jobrs++;
124 ring++;
125 }
126
127 /* Check to see if QI present. If so, enable */
128 ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
129 CTPR_QI_MASK);
130 if (ctrlpriv->qi_present) {
131 ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
132 /* This is all that's required to physically enable QI */
133 wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
134 }
135
136 /* If no QI and no rings specified, quit and go home */
137 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
138 dev_err(dev, "no queues configured, terminating\n");
139 caam_remove(pdev);
140 return -ENOMEM;
141 }
142
143 /* NOTE: RTIC detection ought to go here, around Si time */
144
145 /* Initialize queue allocator lock */
146 spin_lock_init(&ctrlpriv->jr_alloc_lock);
147
148 /* Report "alive" for developer to see */
149 dev_info(dev, "device ID = 0x%016llx\n",
150 rd_reg64(&topregs->ctrl.perfmon.caam_id));
151 dev_info(dev, "job rings = %d, qi = %d\n",
152 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
153
154#ifdef CONFIG_DEBUG_FS
155 /*
156 * FIXME: needs better naming distinction, as some amalgamation of
157 * "caam" and nprop->full_name. The OF name isn't distinctive,
158 * but does separate instances
159 */
160 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
161
162 ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
163 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
164
165 /* Controller-level - performance monitor counters */
166 ctrlpriv->ctl_rq_dequeued =
167 debugfs_create_u64("rq_dequeued",
168 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
169 ctrlpriv->ctl, &perfmon->req_dequeued);
170 ctrlpriv->ctl_ob_enc_req =
171 debugfs_create_u64("ob_rq_encrypted",
172 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
173 ctrlpriv->ctl, &perfmon->ob_enc_req);
174 ctrlpriv->ctl_ib_dec_req =
175 debugfs_create_u64("ib_rq_decrypted",
176 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
177 ctrlpriv->ctl, &perfmon->ib_dec_req);
178 ctrlpriv->ctl_ob_enc_bytes =
179 debugfs_create_u64("ob_bytes_encrypted",
180 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
181 ctrlpriv->ctl, &perfmon->ob_enc_bytes);
182 ctrlpriv->ctl_ob_prot_bytes =
183 debugfs_create_u64("ob_bytes_protected",
184 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
185 ctrlpriv->ctl, &perfmon->ob_prot_bytes);
186 ctrlpriv->ctl_ib_dec_bytes =
187 debugfs_create_u64("ib_bytes_decrypted",
188 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
189 ctrlpriv->ctl, &perfmon->ib_dec_bytes);
190 ctrlpriv->ctl_ib_valid_bytes =
191 debugfs_create_u64("ib_bytes_validated",
192 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
193 ctrlpriv->ctl, &perfmon->ib_valid_bytes);
194
195 /* Controller level - global status values */
196 ctrlpriv->ctl_faultaddr =
197 debugfs_create_u64("fault_addr",
198 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
199 ctrlpriv->ctl, &perfmon->faultaddr);
200 ctrlpriv->ctl_faultdetail =
201 debugfs_create_u32("fault_detail",
202 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
203 ctrlpriv->ctl, &perfmon->faultdetail);
204 ctrlpriv->ctl_faultstatus =
205 debugfs_create_u32("fault_status",
206 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
207 ctrlpriv->ctl, &perfmon->status);
208
209 /* Internal covering keys (useful in non-secure mode only) */
210 ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
211 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
212 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
213 S_IFCHR | S_IRUSR |
214 S_IRGRP | S_IROTH,
215 ctrlpriv->ctl,
216 &ctrlpriv->ctl_kek_wrap);
217
218 ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
219 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
220 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
221 S_IFCHR | S_IRUSR |
222 S_IRGRP | S_IROTH,
223 ctrlpriv->ctl,
224 &ctrlpriv->ctl_tkek_wrap);
225
226 ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
227 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
228 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
229 S_IFCHR | S_IRUSR |
230 S_IRGRP | S_IROTH,
231 ctrlpriv->ctl,
232 &ctrlpriv->ctl_tdsk_wrap);
233#endif
234 return 0;
235}
236
237static struct of_device_id caam_match[] = {
238 {
239 .compatible = "fsl,sec4.0",
240 },
241 {},
242};
243MODULE_DEVICE_TABLE(of, caam_match);
244
245static struct of_platform_driver caam_driver = {
246 .driver = {
247 .name = "caam",
248 .owner = THIS_MODULE,
249 .of_match_table = caam_match,
250 },
251 .probe = caam_probe,
252 .remove = __devexit_p(caam_remove),
253};
254
255static int __init caam_base_init(void)
256{
257 return of_register_platform_driver(&caam_driver);
258}
259
260static void __exit caam_base_exit(void)
261{
262 return of_unregister_platform_driver(&caam_driver);
263}
264
265module_init(caam_base_init);
266module_exit(caam_base_exit);
267
268MODULE_LICENSE("GPL");
269MODULE_DESCRIPTION("FSL CAAM request backend");
270MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
new file mode 100644
index 000000000000..974a75842da9
--- /dev/null
+++ b/drivers/crypto/caam/desc.h
@@ -0,0 +1,1605 @@
1/*
2 * CAAM descriptor composition header
3 * Definitions to support CAAM descriptor instruction generation
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#ifndef DESC_H
9#define DESC_H
10
11/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
12#define MAX_CAAM_DESCSIZE 64
13
14/* Block size of any entity covered/uncovered with a KEK/TKEK */
15#define KEK_BLOCKSIZE 16
16
17/*
18 * Supported descriptor command types as they show up
19 * inside a descriptor command word.
20 */
21#define CMD_SHIFT 27
22#define CMD_MASK 0xf8000000
23
24#define CMD_KEY (0x00 << CMD_SHIFT)
25#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
26#define CMD_LOAD (0x02 << CMD_SHIFT)
27#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
28#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
29#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
30#define CMD_STORE (0x0a << CMD_SHIFT)
31#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
32#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
33#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
34#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
35#define CMD_MOVE (0x0f << CMD_SHIFT)
36#define CMD_OPERATION (0x10 << CMD_SHIFT)
37#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
38#define CMD_JUMP (0x14 << CMD_SHIFT)
39#define CMD_MATH (0x15 << CMD_SHIFT)
40#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
41#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
42#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
43#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
44
45/* General-purpose class selector for all commands */
46#define CLASS_SHIFT 25
47#define CLASS_MASK (0x03 << CLASS_SHIFT)
48
49#define CLASS_NONE (0x00 << CLASS_SHIFT)
50#define CLASS_1 (0x01 << CLASS_SHIFT)
51#define CLASS_2 (0x02 << CLASS_SHIFT)
52#define CLASS_BOTH (0x03 << CLASS_SHIFT)
53
54/*
55 * Descriptor header command constructs
56 * Covers shared, job, and trusted descriptor headers
57 */
58
59/*
60 * Do Not Run - marks a descriptor inexecutable if there was
61 * a preceding error somewhere
62 */
63#define HDR_DNR 0x01000000
64
65/*
66 * ONE - should always be set. Combination of ONE (always
67 * set) and ZRO (always clear) forms an endianness sanity check
68 */
69#define HDR_ONE 0x00800000
70#define HDR_ZRO 0x00008000
71
72/* Start Index or SharedDesc Length */
73#define HDR_START_IDX_MASK 0x3f
74#define HDR_START_IDX_SHIFT 16
75
76/* If shared descriptor header, 6-bit length */
77#define HDR_DESCLEN_SHR_MASK 0x3f
78
79/* If non-shared header, 7-bit length */
80#define HDR_DESCLEN_MASK 0x7f
81
82/* This is a TrustedDesc (if not SharedDesc) */
83#define HDR_TRUSTED 0x00004000
84
85/* Make into TrustedDesc (if not SharedDesc) */
86#define HDR_MAKE_TRUSTED 0x00002000
87
88/* Save context if self-shared (if SharedDesc) */
89#define HDR_SAVECTX 0x00001000
90
91/* Next item points to SharedDesc */
92#define HDR_SHARED 0x00001000
93
94/*
95 * Reverse Execution Order - execute JobDesc first, then
96 * execute SharedDesc (normally SharedDesc goes first).
97 */
98#define HDR_REVERSE 0x00000800
99
100/* Propogate DNR property to SharedDesc */
101#define HDR_PROP_DNR 0x00000800
102
103/* JobDesc/SharedDesc share property */
104#define HDR_SD_SHARE_MASK 0x03
105#define HDR_SD_SHARE_SHIFT 8
106#define HDR_JD_SHARE_MASK 0x07
107#define HDR_JD_SHARE_SHIFT 8
108
109#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
110#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
111#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
112#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
113#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
114
115/* JobDesc/SharedDesc descriptor length */
116#define HDR_JD_LENGTH_MASK 0x7f
117#define HDR_SD_LENGTH_MASK 0x3f
118
119/*
120 * KEY/SEQ_KEY Command Constructs
121 */
122
123/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
124#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
125#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
126
127/* Scatter-Gather Table/Variable Length Field */
128#define KEY_SGF 0x01000000
129#define KEY_VLF 0x01000000
130
131/* Immediate - Key follows command in the descriptor */
132#define KEY_IMM 0x00800000
133
134/*
135 * Encrypted - Key is encrypted either with the KEK, or
136 * with the TDKEK if TK is set
137 */
138#define KEY_ENC 0x00400000
139
140/*
141 * No Write Back - Do not allow key to be FIFO STOREd
142 */
143#define KEY_NWB 0x00200000
144
145/*
146 * Enhanced Encryption of Key
147 */
148#define KEY_EKT 0x00100000
149
150/*
151 * Encrypted with Trusted Key
152 */
153#define KEY_TK 0x00008000
154
155/*
156 * KDEST - Key Destination: 0 - class key register,
157 * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
158 */
159#define KEY_DEST_SHIFT 16
160#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
161
162#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
163#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
164#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
165#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
166
167/* Length in bytes */
168#define KEY_LENGTH_MASK 0x000003ff
169
170/*
171 * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
172 */
173
174/*
175 * Load/Store Destination: 0 = class independent CCB,
176 * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
177 */
178#define LDST_CLASS_SHIFT 25
179#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
180#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
181#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
182#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
183#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
184
185/* Scatter-Gather Table/Variable Length Field */
186#define LDST_SGF 0x01000000
187#define LDST_VLF LDST_SGF
188
189/* Immediate - Key follows this command in descriptor */
190#define LDST_IMM_MASK 1
191#define LDST_IMM_SHIFT 23
192#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
193
194/* SRC/DST - Destination for LOAD, Source for STORE */
195#define LDST_SRCDST_SHIFT 16
196#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
197
198#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
199#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
200#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
201#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
202
203#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
204#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
205#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
206#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
207#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
208#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
209#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
210#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
211#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
212#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
213#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
214#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
215#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
216#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
217#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
218#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
219#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
220#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
221#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
222#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
223#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
224#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
225#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
226
227/* Offset in source/destination */
228#define LDST_OFFSET_SHIFT 8
229#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
230
231/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
232/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
233#define LDOFF_CHG_SHARE_SHIFT 0
234#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
235#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
236#define LDOFF_CHG_SHARE_OK_NO_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
237#define LDOFF_CHG_SHARE_OK_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
238
239#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
240#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
241
242#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
243#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
244#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
245#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
246#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
247
248#define LDOFF_CHG_SEQLIODN_SHIFT 6
249#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
250#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
251#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
252#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
253
254/* Data length in bytes */
255#define LDST_LEN_SHIFT 0
256#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
257
258/* Special Length definitions when dst=deco-ctrl */
259#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
260#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
261#define LDLEN_RST_OFIFO (1 << 5)
262#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
263#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
264#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
265#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
266
267/*
268 * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
269 * Command Constructs
270 */
271
272/*
273 * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
274 * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
275 * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
276 */
277#define FIFOLD_CLASS_SHIFT 25
278#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
279#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
280#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
281#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
282#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
283
284#define FIFOST_CLASS_SHIFT 25
285#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
286#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
287#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
288#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
289
290/*
291 * Scatter-Gather Table/Variable Length Field
292 * If set for FIFO_LOAD, refers to a SG table. Within
293 * SEQ_FIFO_LOAD, is variable input sequence
294 */
295#define FIFOLDST_SGF_SHIFT 24
296#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
297#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
298#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
299#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
300
301/* Immediate - Data follows command in descriptor */
302#define FIFOLD_IMM_SHIFT 23
303#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
304#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
305
306/* Continue - Not the last FIFO store to come */
307#define FIFOST_CONT_SHIFT 23
308#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
309#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
310
311/*
312 * Extended Length - use 32-bit extended length that
313 * follows the pointer field. Illegal with IMM set
314 */
315#define FIFOLDST_EXT_SHIFT 22
316#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
317#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
318
319/* Input data type.*/
320#define FIFOLD_TYPE_SHIFT 16
321#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
322#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
323
324/* PK types */
325#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
326#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
327#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
328#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
329#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
330#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
331#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
332#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
333#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
334#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
335#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
336#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
337#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
338#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
339
340/* Other types. Need to OR in last/flush bits as desired */
341#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
342#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
343#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
344#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
345#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
346#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
347#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
348
349/* Last/Flush bits for use with "other" types above */
350#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
351#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
352#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
353#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
354#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
355#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
356#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
357#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
358#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
359
360#define FIFOLDST_LEN_MASK 0xffff
361#define FIFOLDST_EXT_LEN_MASK 0xffffffff
362
363/* Output data types */
364#define FIFOST_TYPE_SHIFT 16
365#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
366
367#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
368#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
369#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
370#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
371#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
372#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
373#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
374#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
375#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
376#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
377#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
378#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
379#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
380#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
381#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
382#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
383#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
384#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
385#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
386#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
387#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
388#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
389#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
390#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
391#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
392
393/*
394 * OPERATION Command Constructs
395 */
396
397/* Operation type selectors - OP TYPE */
398#define OP_TYPE_SHIFT 24
399#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
400
401#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
402#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
403#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
404#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
405#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
406#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
407
408/* ProtocolID selectors - PROTID */
409#define OP_PCLID_SHIFT 16
410#define OP_PCLID_MASK (0xff << 16)
411
412/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
413#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
414#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
415#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
416#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
417#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
418#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
419#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
420#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
421#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
422#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
423#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
424#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
425
426/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
427#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
428#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
429#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
430#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
431#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
432#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
433#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
434#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
435#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
436#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
437
438/*
439 * ProtocolInfo selectors
440 */
441#define OP_PCLINFO_MASK 0xffff
442
443/* for OP_PCLID_IPSEC */
444#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
445#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
446
447#define OP_PCL_IPSEC_DES_IV64 0x0100
448#define OP_PCL_IPSEC_DES 0x0200
449#define OP_PCL_IPSEC_3DES 0x0300
450#define OP_PCL_IPSEC_AES_CBC 0x0c00
451#define OP_PCL_IPSEC_AES_CTR 0x0d00
452#define OP_PCL_IPSEC_AES_XTS 0x1600
453#define OP_PCL_IPSEC_AES_CCM8 0x0e00
454#define OP_PCL_IPSEC_AES_CCM12 0x0f00
455#define OP_PCL_IPSEC_AES_CCM16 0x1000
456#define OP_PCL_IPSEC_AES_GCM8 0x1200
457#define OP_PCL_IPSEC_AES_GCM12 0x1300
458#define OP_PCL_IPSEC_AES_GCM16 0x1400
459
460#define OP_PCL_IPSEC_HMAC_NULL 0x0000
461#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
462#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
463#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
464#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
465#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
466#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
467#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
468#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
469
470/* For SRTP - OP_PCLID_SRTP */
471#define OP_PCL_SRTP_CIPHER_MASK 0xff00
472#define OP_PCL_SRTP_AUTH_MASK 0x00ff
473
474#define OP_PCL_SRTP_AES_CTR 0x0d00
475
476#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
477
478/* For SSL 3.0 - OP_PCLID_SSL30 */
479#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
480#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
481#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
482#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
483#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
484#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
485#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
486#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
487#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
488#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
489#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
490#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
491#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
492#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
493#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
494#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
495#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
496
497#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
498#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
499#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
500#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
501#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
502#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
503#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
504#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
505#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
506#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
507#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
508#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
509#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
510#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
511#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
512#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
513#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
514
515#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
516
517#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
518#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
519#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
520#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
521#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
522#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
523#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
524#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
525#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
526#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
527#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
528#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
529#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
530#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
531#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
532#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
533#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
534#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
535
536#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
537
538#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
539
540#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
541#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
542#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
543#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
544#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
545#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
546#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
547
548#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
549#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
550#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
551#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
552#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
553#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
554#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
555
556#define OP_PCL_SSL30_RC4_128_MD5 0x0024
557#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
558#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
559
560#define OP_PCL_SSL30_RC4_40_MD5 0x002b
561#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
562#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
563
564#define OP_PCL_SSL30_RC4_128_SHA 0x0020
565#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
566#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
567#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
568#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
569#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
570#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
571#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
572#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
573#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
574
575#define OP_PCL_SSL30_RC4_40_SHA 0x0028
576
577
578/* For TLS 1.0 - OP_PCLID_TLS10 */
579#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
580#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
581#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
582#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
583#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
584#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
585#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
586#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
587#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
588#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
589#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
590#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
591#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
592#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
593#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
594#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
595#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
596
597#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
598#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
599#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
600#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
601#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
602#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
603#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
604#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
605#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
606#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
607#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
608#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
609#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
610#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
611#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
612#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
613#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
614
615/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
616
617#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
618#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
619#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
620#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
621#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
622#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
623#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
624#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
625#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
626#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
627#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
628#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
629#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
630#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
631#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
632#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
633#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
634#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
635
636#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
637
638#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
639
640#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
641#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
642#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
643#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
644#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
645#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
646#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
647
648
649#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
650#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
651#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
652#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
653#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
654#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
655#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
656
657#define OP_PCL_TLS10_RC4_128_MD5 0x0024
658#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
659#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
660
661#define OP_PCL_TLS10_RC4_40_MD5 0x002b
662#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
663#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
664
665#define OP_PCL_TLS10_RC4_128_SHA 0x0020
666#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
667#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
668#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
669#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
670#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
671#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
672#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
673#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
674#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
675
676#define OP_PCL_TLS10_RC4_40_SHA 0x0028
677
678#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
679#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
680#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
681#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
682#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
683#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
684#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
685#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
686#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
687#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
688#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
689#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
690#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
691#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
692#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
693#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
694#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
695#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
696#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
697#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
698#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
699
700
701
702/* For TLS 1.1 - OP_PCLID_TLS11 */
703#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
704#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
705#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
706#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
707#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
708#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
709#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
710#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
711#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
712#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
713#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
714#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
715#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
716#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
717#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
718#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
719#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
720
721#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
722#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
723#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
724#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
725#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
726#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
727#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
728#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
729#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
730#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
731#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
732#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
733#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
734#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
735#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
736#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
737#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
738
739/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
740
741#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
742#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
743#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
744#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
745#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
746#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
747#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
748#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
749#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
750#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
751#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
752#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
753#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
754#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
755#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
756#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
757#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
758#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
759
760#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
761
762#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
763
764#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
765#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
766#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
767#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
768#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
769#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
770#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
771
772#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
773#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
774#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
775#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
776#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
777#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
778#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
779
780#define OP_PCL_TLS11_RC4_128_MD5 0x0024
781#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
782#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
783
784#define OP_PCL_TLS11_RC4_40_MD5 0x002b
785#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
786#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
787
788#define OP_PCL_TLS11_RC4_128_SHA 0x0020
789#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
790#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
791#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
792#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
793#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
794#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
795#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
796#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
797#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
798
799#define OP_PCL_TLS11_RC4_40_SHA 0x0028
800
801#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
802#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
803#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
804#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
805#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
806#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
807#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
808#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
809#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
810#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
811#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
812#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
813#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
814#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
815#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
816#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
817#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
818#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
819#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
820#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
821#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
822
823
824/* For TLS 1.2 - OP_PCLID_TLS12 */
825#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
826#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
827#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
828#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
829#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
830#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
831#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
832#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
833#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
834#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
835#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
836#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
837#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
838#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
839#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
840#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
841#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
842
843#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
844#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
845#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
846#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
847#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
848#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
849#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
850#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
851#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
852#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
853#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
854#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
855#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
856#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
857#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
858#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
859#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
860
861/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
862
863#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
864#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
865#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
866#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
867#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
868#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
869#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
870#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
871#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
872#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
873#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
874#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
875#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
876#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
877#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
878#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
879#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
880#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
881
882#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
883
884#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
885
886#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
887#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
888#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
889#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
890#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
891#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
892#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
893
894#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
895#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
896#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
897#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
898#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
899#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
900#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
901
902#define OP_PCL_TLS12_RC4_128_MD5 0x0024
903#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
904#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
905
906#define OP_PCL_TLS12_RC4_40_MD5 0x002b
907#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
908#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
909
910#define OP_PCL_TLS12_RC4_128_SHA 0x0020
911#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
912#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
913#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
914#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
915#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
916#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
917#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
918#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
919#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
920
921#define OP_PCL_TLS12_RC4_40_SHA 0x0028
922
923/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
924#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
925#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
926#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
927#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
928#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
929
930/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
931#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
932#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
933#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
934#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
935#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
936
937/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
938
939#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
940#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
941#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
942#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
943#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
944#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
945#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
946#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
947#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
948#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
949#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
950#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
951#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
952#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
953#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
954#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
955#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
956#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
957#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
958#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
959#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
960
961/* For DTLS - OP_PCLID_DTLS */
962
963#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
964#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
965#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
966#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
967#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
968#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
969#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
970#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
971#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
972#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
973#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
974#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
975#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
976#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
977#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
978#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
979#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
980
981#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
982#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
983#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
984#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
985#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
986#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
987#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
988#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
989#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
990#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
991#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
992#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
993#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
994#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
995#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
996#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
997#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
998
999/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
1000
1001#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
1002#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
1003#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
1004#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
1005#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
1006#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
1007#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
1008#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
1009#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
1010#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
1011#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
1012#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
1013#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
1014#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
1015#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
1016#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
1017#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
1018#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
1019
1020#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
1021
1022#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
1023
1024#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
1025#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
1026#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
1027#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
1028#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
1029#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
1030#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
1031
1032
1033#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
1034#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
1035#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
1036#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
1037#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
1038#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
1039#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
1040
1041
1042#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
1043#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
1044#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
1045#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
1046#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
1047#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
1048#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
1049#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
1050#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
1051#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
1052#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
1053#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
1054#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
1055#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
1056#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
1057#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
1058#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
1059#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
1060#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
1061#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
1062#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
1063
1064/* 802.16 WiMAX protinfos */
1065#define OP_PCL_WIMAX_OFDM 0x0201
1066#define OP_PCL_WIMAX_OFDMA 0x0231
1067
1068/* 802.11 WiFi protinfos */
1069#define OP_PCL_WIFI 0xac04
1070
1071/* MacSec protinfos */
1072#define OP_PCL_MACSEC 0x0001
1073
1074/* PKI unidirectional protocol protinfo bits */
1075#define OP_PCL_PKPROT_TEST 0x0008
1076#define OP_PCL_PKPROT_DECRYPT 0x0004
1077#define OP_PCL_PKPROT_ECC 0x0002
1078#define OP_PCL_PKPROT_F2M 0x0001
1079
1080/* For non-protocol/alg-only op commands */
1081#define OP_ALG_TYPE_SHIFT 24
1082#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
1083#define OP_ALG_TYPE_CLASS1 2
1084#define OP_ALG_TYPE_CLASS2 4
1085
1086#define OP_ALG_ALGSEL_SHIFT 16
1087#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
1088#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
1089#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
1090#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
1091#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
1092#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
1093#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
1094#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
1095#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
1096#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
1097#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
1098#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
1099#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
1100#define OP_ALG_ALGSEL_SNOW (0x60 << OP_ALG_ALGSEL_SHIFT)
1101#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
1102#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
1103#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
1104#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
1105
1106#define OP_ALG_AAI_SHIFT 4
1107#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
1108
1109/* blockcipher AAI set */
1110#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
1111#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
1112#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
1113#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
1114#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
1115#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
1116#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
1117#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
1118#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
1119#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
1120#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
1121#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
1122#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
1123#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
1124#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
1125#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
1126#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
1127#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
1128#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
1129#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
1130#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
1131#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
1132#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
1133#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
1134#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
1135#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
1136#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
1137#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
1138#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
1139
1140/* randomizer AAI set */
1141#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
1142#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT)
1143#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT)
1144
1145/* hmac/smac AAI set */
1146#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
1147#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
1148#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
1149#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
1150
1151/* CRC AAI set*/
1152#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
1153#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
1154#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
1155#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
1156#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
1157#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
1158
1159/* Kasumi/SNOW AAI set */
1160#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
1161#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
1162#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
1163#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
1164
1165
1166#define OP_ALG_AS_SHIFT 2
1167#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
1168#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
1169#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
1170#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
1171#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
1172
1173#define OP_ALG_ICV_SHIFT 1
1174#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
1175#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT)
1176#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT)
1177
1178#define OP_ALG_DIR_SHIFT 0
1179#define OP_ALG_DIR_MASK 1
1180#define OP_ALG_DECRYPT 0
1181#define OP_ALG_ENCRYPT 1
1182
1183/* PKHA algorithm type set */
1184#define OP_ALG_PK 0x00800000
1185#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
1186
1187/* PKHA mode clear memory functions */
1188#define OP_ALG_PKMODE_A_RAM 0x80000
1189#define OP_ALG_PKMODE_B_RAM 0x40000
1190#define OP_ALG_PKMODE_E_RAM 0x20000
1191#define OP_ALG_PKMODE_N_RAM 0x10000
1192#define OP_ALG_PKMODE_CLEARMEM 0x00001
1193
1194/* PKHA mode modular-arithmetic functions */
1195#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
1196#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
1197#define OP_ALG_PKMODE_MOD_F2M 0x20000
1198#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
1199#define OP_ALG_PKMODE_PRJECTV 0x00800
1200#define OP_ALG_PKMODE_TIME_EQ 0x400
1201#define OP_ALG_PKMODE_OUT_B 0x000
1202#define OP_ALG_PKMODE_OUT_A 0x100
1203#define OP_ALG_PKMODE_MOD_ADD 0x002
1204#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
1205#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
1206#define OP_ALG_PKMODE_MOD_MULT 0x005
1207#define OP_ALG_PKMODE_MOD_EXPO 0x006
1208#define OP_ALG_PKMODE_MOD_REDUCT 0x007
1209#define OP_ALG_PKMODE_MOD_INV 0x008
1210#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
1211#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
1212#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
1213#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
1214#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
1215#define OP_ALG_PKMODE_MOD_GCD 0x00e
1216#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
1217
1218/* PKHA mode copy-memory functions */
1219#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
1220#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1221#define OP_ALG_PKMODE_DST_REG_SHIFT 10
1222#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
1223#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
1224#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1225#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
1226#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1227
1228#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1229#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1230#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1231#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
1232#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
1233#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
1234#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
1235#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1236#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1237#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1238#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1239#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1240#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1241#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1242#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1243#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
1244#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
1245
1246/*
1247 * SEQ_IN_PTR Command Constructs
1248 */
1249
1250/* Release Buffers */
1251#define SQIN_RBS 0x04000000
1252
1253/* Sequence pointer is really a descriptor */
1254#define SQIN_INL 0x02000000
1255
1256/* Sequence pointer is a scatter-gather table */
1257#define SQIN_SGF 0x01000000
1258
1259/* Appends to a previous pointer */
1260#define SQIN_PRE 0x00800000
1261
1262/* Use extended length following pointer */
1263#define SQIN_EXT 0x00400000
1264
1265/* Restore sequence with pointer/length */
1266#define SQIN_RTO 0x00200000
1267
1268/* Replace job descriptor */
1269#define SQIN_RJD 0x00100000
1270
1271#define SQIN_LEN_SHIFT 0
1272#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
1273
1274/*
1275 * SEQ_OUT_PTR Command Constructs
1276 */
1277
1278/* Sequence pointer is a scatter-gather table */
1279#define SQOUT_SGF 0x01000000
1280
1281/* Appends to a previous pointer */
1282#define SQOUT_PRE 0x00800000
1283
1284/* Restore sequence with pointer/length */
1285#define SQOUT_RTO 0x00200000
1286
1287/* Use extended length following pointer */
1288#define SQOUT_EXT 0x00400000
1289
1290#define SQOUT_LEN_SHIFT 0
1291#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
1292
1293
1294/*
1295 * SIGNATURE Command Constructs
1296 */
1297
1298/* TYPE field is all that's relevant */
1299#define SIGN_TYPE_SHIFT 16
1300#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
1301
1302#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
1303#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
1304#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
1305#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
1306#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
1307#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
1308
1309/*
1310 * MOVE Command Constructs
1311 */
1312
1313#define MOVE_AUX_SHIFT 25
1314#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
1315#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
1316#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
1317
1318#define MOVE_WAITCOMP_SHIFT 24
1319#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
1320#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
1321
1322#define MOVE_SRC_SHIFT 20
1323#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
1324#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
1325#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
1326#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
1327#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
1328#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
1329#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
1330#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
1331#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
1332#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
1333#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
1334
1335#define MOVE_DEST_SHIFT 16
1336#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
1337#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
1338#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
1339#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
1340#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
1341#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
1342#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
1343#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
1344#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
1345#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
1346#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
1347#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
1348#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
1349#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
1350
1351#define MOVE_OFFSET_SHIFT 8
1352#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
1353
1354#define MOVE_LEN_SHIFT 0
1355#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
1356
1357#define MOVELEN_MRSEL_SHIFT 0
1358#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
1359
1360/*
1361 * MATH Command Constructs
1362 */
1363
1364#define MATH_IFB_SHIFT 26
1365#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
1366#define MATH_IFB (1 << MATH_IFB_SHIFT)
1367
1368#define MATH_NFU_SHIFT 25
1369#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
1370#define MATH_NFU (1 << MATH_NFU_SHIFT)
1371
1372#define MATH_STL_SHIFT 24
1373#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
1374#define MATH_STL (1 << MATH_STL_SHIFT)
1375
1376/* Function selectors */
1377#define MATH_FUN_SHIFT 20
1378#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
1379#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
1380#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
1381#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
1382#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
1383#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
1384#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
1385#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
1386#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
1387#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
1388#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
1389#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
1390
1391/* Source 0 selectors */
1392#define MATH_SRC0_SHIFT 16
1393#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
1394#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
1395#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
1396#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
1397#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
1398#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
1399#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
1400#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
1401#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
1402#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
1403#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
1404
1405/* Source 1 selectors */
1406#define MATH_SRC1_SHIFT 12
1407#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
1408#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
1409#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
1410#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
1411#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
1412#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
1413#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
1414#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
1415#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
1416
1417/* Destination selectors */
1418#define MATH_DEST_SHIFT 8
1419#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
1420#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
1421#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
1422#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
1423#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
1424#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
1425#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
1426#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
1427#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
1428#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
1429
1430/* Length selectors */
1431#define MATH_LEN_SHIFT 0
1432#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
1433#define MATH_LEN_1BYTE 0x01
1434#define MATH_LEN_2BYTE 0x02
1435#define MATH_LEN_4BYTE 0x04
1436#define MATH_LEN_8BYTE 0x08
1437
1438/*
1439 * JUMP Command Constructs
1440 */
1441
1442#define JUMP_CLASS_SHIFT 25
1443#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
1444#define JUMP_CLASS_NONE 0
1445#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
1446#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
1447#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
1448
1449#define JUMP_JSL_SHIFT 24
1450#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
1451#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
1452
1453#define JUMP_TYPE_SHIFT 22
1454#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
1455#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
1456#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
1457#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
1458#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
1459
1460#define JUMP_TEST_SHIFT 16
1461#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
1462#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
1463#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
1464#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
1465#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
1466
1467/* Condition codes. JSL bit is factored in */
1468#define JUMP_COND_SHIFT 8
1469#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
1470#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
1471#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
1472#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
1473#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
1474#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
1475#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
1476#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
1477
1478#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
1479#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
1480#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
1481#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
1482#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
1483#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
1484#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
1485#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
1486
1487#define JUMP_OFFSET_SHIFT 0
1488#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
1489
1490/*
1491 * NFIFO ENTRY
1492 * Data Constructs
1493 *
1494 */
1495#define NFIFOENTRY_DEST_SHIFT 30
1496#define NFIFOENTRY_DEST_MASK (3 << NFIFOENTRY_DEST_SHIFT)
1497#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
1498#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
1499#define NFIFOENTRY_DEST_CLASS2 (2 << NFIFOENTRY_DEST_SHIFT)
1500#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT)
1501
1502#define NFIFOENTRY_LC2_SHIFT 29
1503#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
1504#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
1505
1506#define NFIFOENTRY_LC1_SHIFT 28
1507#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
1508#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
1509
1510#define NFIFOENTRY_FC2_SHIFT 27
1511#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
1512#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
1513
1514#define NFIFOENTRY_FC1_SHIFT 26
1515#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
1516#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
1517
1518#define NFIFOENTRY_STYPE_SHIFT 24
1519#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
1520#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
1521#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
1522#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
1523#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
1524
1525#define NFIFOENTRY_DTYPE_SHIFT 20
1526#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
1527
1528#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
1529#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
1530#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
1531#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
1532#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
1533#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
1534#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
1535
1536#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
1537#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
1538#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
1539#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
1540#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
1541#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
1542#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
1543#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
1544#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
1545#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
1546#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
1547#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
1548
1549
1550#define NFIFOENTRY_BND_SHIFT 19
1551#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
1552#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
1553
1554#define NFIFOENTRY_PTYPE_SHIFT 16
1555#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
1556
1557#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
1558#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
1559#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
1560#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
1561#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
1562#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
1563#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
1564#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
1565
1566#define NFIFOENTRY_OC_SHIFT 15
1567#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
1568#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
1569
1570#define NFIFOENTRY_AST_SHIFT 14
1571#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
1572#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
1573
1574#define NFIFOENTRY_BM_SHIFT 11
1575#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
1576#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
1577
1578#define NFIFOENTRY_PS_SHIFT 10
1579#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
1580#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
1581
1582
1583#define NFIFOENTRY_DLEN_SHIFT 0
1584#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
1585
1586#define NFIFOENTRY_PLEN_SHIFT 0
1587#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
1588
1589/*
1590 * PDB internal definitions
1591 */
1592
1593/* IPSec ESP CBC Encap/Decap Options */
1594#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
1595#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
1596#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
1597#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
1598#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
1599#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
1600#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
1601#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
1602#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
1603#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
1604
1605#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
new file mode 100644
index 000000000000..c224f39e94a7
--- /dev/null
+++ b/drivers/crypto/caam/desc_constr.h
@@ -0,0 +1,204 @@
1/*
2 * caam descriptor construction helper functions
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#include "desc.h"
8
9#define IMMEDIATE (1 << 23)
10#define CAAM_CMD_SZ sizeof(u32)
11#define CAAM_PTR_SZ sizeof(dma_addr_t)
12
13#ifdef DEBUG
14#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
15 &__func__[sizeof("append")]); } while (0)
16#else
17#define PRINT_POS
18#endif
19
20#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
21 LDST_SRCDST_WORD_DECOCTRL | \
22 (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
23#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
24 LDST_SRCDST_WORD_DECOCTRL | \
25 (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
26
27static inline int desc_len(u32 *desc)
28{
29 return *desc & HDR_DESCLEN_MASK;
30}
31
32static inline int desc_bytes(void *desc)
33{
34 return desc_len(desc) * CAAM_CMD_SZ;
35}
36
37static inline u32 *desc_end(u32 *desc)
38{
39 return desc + desc_len(desc);
40}
41
42static inline void *sh_desc_pdb(u32 *desc)
43{
44 return desc + 1;
45}
46
47static inline void init_desc(u32 *desc, u32 options)
48{
49 *desc = options | HDR_ONE | 1;
50}
51
52static inline void init_sh_desc(u32 *desc, u32 options)
53{
54 PRINT_POS;
55 init_desc(desc, CMD_SHARED_DESC_HDR | options);
56}
57
58static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
59{
60 u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1;
61
62 init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) |
63 options);
64}
65
66static inline void init_job_desc(u32 *desc, u32 options)
67{
68 init_desc(desc, CMD_DESC_HDR | options);
69}
70
71static inline void append_ptr(u32 *desc, dma_addr_t ptr)
72{
73 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
74
75 *offset = ptr;
76
77 (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
78}
79
80static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
81 u32 options)
82{
83 PRINT_POS;
84 init_job_desc(desc, HDR_SHARED | options |
85 (len << HDR_START_IDX_SHIFT));
86 append_ptr(desc, ptr);
87}
88
89static inline void append_data(u32 *desc, void *data, int len)
90{
91 u32 *offset = desc_end(desc);
92
93 if (len) /* avoid sparse warning: memcpy with byte count of 0 */
94 memcpy(offset, data, len);
95
96 (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
97}
98
99static inline void append_cmd(u32 *desc, u32 command)
100{
101 u32 *cmd = desc_end(desc);
102
103 *cmd = command;
104
105 (*desc)++;
106}
107
108static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
109 u32 command)
110{
111 append_cmd(desc, command | len);
112 append_ptr(desc, ptr);
113}
114
115static inline void append_cmd_data(u32 *desc, void *data, int len,
116 u32 command)
117{
118 append_cmd(desc, command | IMMEDIATE | len);
119 append_data(desc, data, len);
120}
121
122static inline u32 *append_jump(u32 *desc, u32 options)
123{
124 u32 *cmd = desc_end(desc);
125
126 PRINT_POS;
127 append_cmd(desc, CMD_JUMP | options);
128
129 return cmd;
130}
131
132static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
133{
134 *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
135}
136
137#define APPEND_CMD(cmd, op) \
138static inline void append_##cmd(u32 *desc, u32 options) \
139{ \
140 PRINT_POS; \
141 append_cmd(desc, CMD_##op | options); \
142}
143APPEND_CMD(operation, OPERATION)
144APPEND_CMD(move, MOVE)
145
146#define APPEND_CMD_LEN(cmd, op) \
147static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
148{ \
149 PRINT_POS; \
150 append_cmd(desc, CMD_##op | len | options); \
151}
152APPEND_CMD_LEN(seq_store, SEQ_STORE)
153APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
154APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
155
156#define APPEND_CMD_PTR(cmd, op) \
157static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
158 u32 options) \
159{ \
160 PRINT_POS; \
161 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
162}
163APPEND_CMD_PTR(key, KEY)
164APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
165APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
166APPEND_CMD_PTR(load, LOAD)
167APPEND_CMD_PTR(store, STORE)
168APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
169APPEND_CMD_PTR(fifo_store, FIFO_STORE)
170
171#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
172static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
173 unsigned int len, u32 options) \
174{ \
175 PRINT_POS; \
176 append_cmd_data(desc, data, len, CMD_##op | options); \
177}
178APPEND_CMD_PTR_TO_IMM(load, LOAD);
179APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
180
181/*
182 * 2nd variant for commands whose specified immediate length differs
183 * from length of immediate data provided, e.g., split keys
184 */
185#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
186static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
187 unsigned int data_len, \
188 unsigned int len, u32 options) \
189{ \
190 PRINT_POS; \
191 append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
192 append_data(desc, data, data_len); \
193}
194APPEND_CMD_PTR_TO_IMM2(key, KEY);
195
196#define APPEND_CMD_RAW_IMM(cmd, op, type) \
197static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
198 u32 options) \
199{ \
200 PRINT_POS; \
201 append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
202 append_cmd(desc, immediate); \
203}
204APPEND_CMD_RAW_IMM(load, LOAD, u32);
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
new file mode 100644
index 000000000000..bd57a6825f57
--- /dev/null
+++ b/drivers/crypto/caam/error.c
@@ -0,0 +1,248 @@
1/*
2 * CAAM Error Reporting
3 *
4 * Copyright 2009-2011 Freescale Semiconductor, Inc.
5 */
6
7#include "compat.h"
8#include "regs.h"
9#include "intern.h"
10#include "desc.h"
11#include "jr.h"
12#include "error.h"
13
14#define SPRINTFCAT(str, format, param, max_alloc) \
15{ \
16 char *tmp; \
17 \
18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
19 sprintf(tmp, format, param); \
20 strcat(str, tmp); \
21 kfree(tmp); \
22}
23
24static void report_jump_idx(u32 status, char *outstr)
25{
26 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
27 JRSTA_DECOERR_INDEX_SHIFT;
28
29 if (status & JRSTA_DECOERR_JUMP)
30 strcat(outstr, "jump tgt desc idx ");
31 else
32 strcat(outstr, "desc idx ");
33
34 SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
35}
36
37static void report_ccb_status(u32 status, char *outstr)
38{
39 char *cha_id_list[] = {
40 "",
41 "AES",
42 "DES, 3DES",
43 "ARC4",
44 "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512",
45 "RNG",
46 "SNOW f8",
47 "Kasumi f8, f9",
48 "All Public Key Algorithms",
49 "CRC",
50 "SNOW f9",
51 };
52 char *err_id_list[] = {
53 "None. No error.",
54 "Mode error.",
55 "Data size error.",
56 "Key size error.",
57 "PKHA A memory size error.",
58 "PKHA B memory size error.",
59 "Data arrived out of sequence error.",
60 "PKHA divide-by-zero error.",
61 "PKHA modulus even error.",
62 "DES key parity error.",
63 "ICV check failed.",
64 "Hardware error.",
65 "Unsupported CCM AAD size.",
66 "Class 1 CHA is not reset",
67 "Invalid CHA combination was selected",
68 "Invalid CHA selected.",
69 };
70 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
71 JRSTA_CCBERR_CHAID_SHIFT;
72 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
73
74 report_jump_idx(status, outstr);
75
76 if (cha_id < sizeof(cha_id_list)) {
77 SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id],
78 strlen(cha_id_list[cha_id]));
79 } else {
80 SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
81 cha_id, sizeof("ff"));
82 }
83
84 if (err_id < sizeof(err_id_list)) {
85 SPRINTFCAT(outstr, "%s", err_id_list[err_id],
86 strlen(err_id_list[err_id]));
87 } else {
88 SPRINTFCAT(outstr, "unidentified err_id value 0x%02x",
89 err_id, sizeof("ff"));
90 }
91}
92
93static void report_jump_status(u32 status, char *outstr)
94{
95 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
96}
97
98static void report_deco_status(u32 status, char *outstr)
99{
100 const struct {
101 u8 value;
102 char *error_text;
103 } desc_error_list[] = {
104 { 0x00, "None. No error." },
105 { 0x01, "SGT Length Error. The descriptor is trying to read "
106 "more data than is contained in the SGT table." },
107 { 0x02, "Reserved." },
108 { 0x03, "Job Ring Control Error. There is a bad value in the "
109 "Job Ring Control register." },
110 { 0x04, "Invalid Descriptor Command. The Descriptor Command "
111 "field is invalid." },
112 { 0x05, "Reserved." },
113 { 0x06, "Invalid KEY Command" },
114 { 0x07, "Invalid LOAD Command" },
115 { 0x08, "Invalid STORE Command" },
116 { 0x09, "Invalid OPERATION Command" },
117 { 0x0A, "Invalid FIFO LOAD Command" },
118 { 0x0B, "Invalid FIFO STORE Command" },
119 { 0x0C, "Invalid MOVE Command" },
120 { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
121 "invalid because the target is not a Job Header "
122 "Command, or the jump is from a Trusted Descriptor to "
123 "a Job Descriptor, or because the target Descriptor "
124 "contains a Shared Descriptor." },
125 { 0x0E, "Invalid MATH Command" },
126 { 0x0F, "Invalid SIGNATURE Command" },
127 { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
128 "Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
129 "LOAD, or SEQ FIFO STORE decremented the input or "
130 "output sequence length below 0. This error may result "
131 "if a built-in PROTOCOL Command has encountered a "
132 "malformed PDU." },
133 { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
134 { 0x12, "Shared Descriptor Header Error" },
135 { 0x13, "Header Error. Invalid length or parity, or certain "
136 "other problems." },
137 { 0x14, "Burster Error. Burster has gotten to an illegal "
138 "state" },
139 { 0x15, "Context Register Length Error. The descriptor is "
140 "trying to read or write past the end of the Context "
141 "Register. A SEQ LOAD or SEQ STORE with the VLF bit "
142 "set was executed with too large a length in the "
143 "variable length register (VSOL for SEQ STORE or VSIL "
144 "for SEQ LOAD)." },
145 { 0x16, "DMA Error" },
146 { 0x17, "Reserved." },
147 { 0x1A, "Job failed due to JR reset" },
148 { 0x1B, "Job failed due to Fail Mode" },
149 { 0x1C, "DECO Watchdog timer timeout error" },
150 { 0x1D, "DECO tried to copy a key from another DECO but the "
151 "other DECO's Key Registers were locked" },
152 { 0x1E, "DECO attempted to copy data from a DECO that had an "
153 "unmasked Descriptor error" },
154 { 0x1F, "LIODN error. DECO was trying to share from itself or "
155 "from another DECO but the two Non-SEQ LIODN values "
156 "didn't match or the 'shared from' DECO's Descriptor "
157 "required that the SEQ LIODNs be the same and they "
158 "aren't." },
159 { 0x20, "DECO has completed a reset initiated via the DRR "
160 "register" },
161 { 0x21, "Nonce error. When using EKT (CCM) key encryption "
162 "option in the FIFO STORE Command, the Nonce counter "
163 "reached its maximum value and this encryption mode "
164 "can no longer be used." },
165 { 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
166 "(input frame; block ciphers) and IPsec decap (output "
167 "frame, when doing the next header byte update) and "
168 "DCRC (output frame)." },
169 { 0x80, "DNR (do not run) error" },
170 { 0x81, "undefined protocol command" },
171 { 0x82, "invalid setting in PDB" },
172 { 0x83, "Anti-replay LATE error" },
173 { 0x84, "Anti-replay REPLAY error" },
174 { 0x85, "Sequence number overflow" },
175 { 0x86, "Sigver invalid signature" },
176 { 0x87, "DSA Sign Illegal test descriptor" },
177 { 0x88, "Protocol Format Error - A protocol has seen an error "
178 "in the format of data received. When running RSA, "
179 "this means that formatting with random padding was "
180 "used, and did not follow the form: 0x00, 0x02, 8-to-N "
181 "bytes of non-zero pad, 0x00, F data." },
182 { 0x89, "Protocol Size Error - A protocol has seen an error in "
183 "size. When running RSA, pdb size N < (size of F) when "
184 "no formatting is used; or pdb size N < (F + 11) when "
185 "formatting is used." },
186 { 0xC1, "Blob Command error: Undefined mode" },
187 { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
188 { 0xC4, "Blob Command error: Black Blob key or input size "
189 "error" },
190 { 0xC5, "Blob Command error: Invalid key destination" },
191 { 0xC8, "Blob Command error: Trusted/Secure mode error" },
192 { 0xF0, "IPsec TTL or hop limit field either came in as 0, "
193 "or was decremented to 0" },
194 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
195 };
196 u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
197 int i;
198
199 report_jump_idx(status, outstr);
200
201 for (i = 0; i < sizeof(desc_error_list); i++)
202 if (desc_error_list[i].value == desc_error)
203 break;
204
205 if (i != sizeof(desc_error_list) && desc_error_list[i].error_text) {
206 SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text,
207 strlen(desc_error_list[i].error_text));
208 } else {
209 SPRINTFCAT(outstr, "unidentified error value 0x%02x",
210 desc_error, sizeof("ff"));
211 }
212}
213
214static void report_jr_status(u32 status, char *outstr)
215{
216 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
217}
218
219static void report_cond_code_status(u32 status, char *outstr)
220{
221 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
222}
223
224char *caam_jr_strstatus(char *outstr, u32 status)
225{
226 struct stat_src {
227 void (*report_ssed)(u32 status, char *outstr);
228 char *error;
229 } status_src[] = {
230 { NULL, "No error" },
231 { NULL, NULL },
232 { report_ccb_status, "CCB" },
233 { report_jump_status, "Jump" },
234 { report_deco_status, "DECO" },
235 { NULL, NULL },
236 { report_jr_status, "Job Ring" },
237 { report_cond_code_status, "Condition Code" },
238 };
239 u32 ssrc = status >> JRSTA_SSRC_SHIFT;
240
241 sprintf(outstr, "%s: ", status_src[ssrc].error);
242
243 if (status_src[ssrc].report_ssed)
244 status_src[ssrc].report_ssed(status, outstr);
245
246 return outstr;
247}
248EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
new file mode 100644
index 000000000000..067afc120132
--- /dev/null
+++ b/drivers/crypto/caam/error.h
@@ -0,0 +1,10 @@
1/*
2 * CAAM Error Reporting code header
3 *
4 * Copyright 2009-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef CAAM_ERROR_H
8#define CAAM_ERROR_H
9extern char *caam_jr_strstatus(char *outstr, u32 status);
10#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
new file mode 100644
index 000000000000..a34be01b0b29
--- /dev/null
+++ b/drivers/crypto/caam/intern.h
@@ -0,0 +1,113 @@
1/*
2 * CAAM/SEC 4.x driver backend
3 * Private/internal definitions between modules
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 *
7 */
8
9#ifndef INTERN_H
10#define INTERN_H
11
12#define JOBR_UNASSIGNED 0
13#define JOBR_ASSIGNED 1
14
15/* Currently comes from Kconfig param as a ^2 (driver-required) */
16#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
17
18/* Kconfig params for interrupt coalescing if selected (else zero) */
19#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
20#define JOBR_INTC JRCFG_ICEN
21#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
22#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
23#else
24#define JOBR_INTC 0
25#define JOBR_INTC_TIME_THLD 0
26#define JOBR_INTC_COUNT_THLD 0
27#endif
28
29/*
30 * Storage for tracking each in-process entry moving across a ring
31 * Each entry on an output ring needs one of these
32 */
33struct caam_jrentry_info {
34 void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
35 void *cbkarg; /* Argument per ring entry */
36 u32 *desc_addr_virt; /* Stored virt addr for postprocessing */
37 dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */
38 u32 desc_size; /* Stored size for postprocessing, header derived */
39};
40
41/* Private sub-storage for a single JobR */
42struct caam_drv_private_jr {
43 struct device *parentdev; /* points back to controller dev */
44 int ridx;
45 struct caam_job_ring __iomem *rregs; /* JobR's register space */
46 struct tasklet_struct irqtask[NR_CPUS];
47 int irq; /* One per queue */
48 int assign; /* busy/free */
49
50 /* Job ring info */
51 int ringsize; /* Size of rings (assume input = output) */
52 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
53 spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
54 int inp_ring_write_index; /* Input index "tail" */
55 int head; /* entinfo (s/w ring) head index */
56 dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
57 spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
58 int out_ring_read_index; /* Output index "tail" */
59 int tail; /* entinfo (s/w ring) tail index */
60 struct jr_outentry *outring; /* Base of output ring, DMA-safe */
61};
62
63/*
64 * Driver-private storage for a single CAAM block instance
65 */
66struct caam_drv_private {
67
68 struct device *dev;
69 struct device **jrdev; /* Alloc'ed array per sub-device */
70 spinlock_t jr_alloc_lock;
71 struct platform_device *pdev;
72
73 /* Physical-presence section */
74 struct caam_ctrl *ctrl; /* controller region */
75 struct caam_deco **deco; /* DECO/CCB views */
76 struct caam_assurance *ac;
77 struct caam_queue_if *qi; /* QI control region */
78
79 /*
80 * Detected geometry block. Filled in from device tree if powerpc,
81 * or from register-based version detection code
82 */
83 u8 total_jobrs; /* Total Job Rings in device */
84 u8 qi_present; /* Nonzero if QI present in device */
85 int secvio_irq; /* Security violation interrupt number */
86
87 /* which jr allocated to scatterlist crypto */
88 atomic_t tfm_count ____cacheline_aligned;
89 int num_jrs_for_algapi;
90 struct device **algapi_jr;
91 /* list of registered crypto algorithms (mk generic context handle?) */
92 struct list_head alg_list;
93
94 /*
95 * debugfs entries for developer view into driver/device
96 * variables at runtime.
97 */
98#ifdef CONFIG_DEBUG_FS
99 struct dentry *dfs_root;
100 struct dentry *ctl; /* controller dir */
101 struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
102 struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
103 struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
104 struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
105
106 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
107 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
108#endif
109};
110
111void caam_jr_algapi_init(struct device *dev);
112void caam_jr_algapi_remove(struct device *dev);
113#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
new file mode 100644
index 000000000000..68cb9af4d1a3
--- /dev/null
+++ b/drivers/crypto/caam/jr.c
@@ -0,0 +1,523 @@
1/*
2 * CAAM/SEC 4.x transport/backend driver
3 * JobR backend functionality
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#include "compat.h"
9#include "regs.h"
10#include "jr.h"
11#include "desc.h"
12#include "intern.h"
13
14/* Main per-ring interrupt handler */
15static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
16{
17 struct device *dev = st_dev;
18 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
19 u32 irqstate;
20
21 /*
22 * Check the output ring for ready responses, kick
23 * tasklet if jobs done.
24 */
25 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
26 if (!irqstate)
27 return IRQ_NONE;
28
29 /*
30 * If JobR error, we got more development work to do
31 * Flag a bug now, but we really need to shut down and
32 * restart the queue (and fix code).
33 */
34 if (irqstate & JRINT_JR_ERROR) {
35 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
36 BUG();
37 }
38
39 /* mask valid interrupts */
40 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
41
42 /* Have valid interrupt at this point, just ACK and trigger */
43 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
44
45 preempt_disable();
46 tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
47 preempt_enable();
48
49 return IRQ_HANDLED;
50}
51
52/* Deferred service handler, run as interrupt-fired tasklet */
53static void caam_jr_dequeue(unsigned long devarg)
54{
55 int hw_idx, sw_idx, i, head, tail;
56 struct device *dev = (struct device *)devarg;
57 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
59 u32 *userdesc, userstatus;
60 void *userarg;
61 unsigned long flags;
62
63 spin_lock_irqsave(&jrp->outlock, flags);
64
65 head = ACCESS_ONCE(jrp->head);
66 sw_idx = tail = jrp->tail;
67
68 while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
69 rd_reg32(&jrp->rregs->outring_used)) {
70
71 hw_idx = jrp->out_ring_read_index;
72 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
73 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
74
75 smp_read_barrier_depends();
76
77 if (jrp->outring[hw_idx].desc ==
78 jrp->entinfo[sw_idx].desc_addr_dma)
79 break; /* found */
80 }
81 /* we should never fail to find a matching descriptor */
82 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
83
84 /* Unmap just-run descriptor so we can post-process */
85 dma_unmap_single(dev, jrp->outring[hw_idx].desc,
86 jrp->entinfo[sw_idx].desc_size,
87 DMA_TO_DEVICE);
88
89 /* mark completed, avoid matching on a recycled desc addr */
90 jrp->entinfo[sw_idx].desc_addr_dma = 0;
91
92 /* Stash callback params for use outside of lock */
93 usercall = jrp->entinfo[sw_idx].callbk;
94 userarg = jrp->entinfo[sw_idx].cbkarg;
95 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
96 userstatus = jrp->outring[hw_idx].jrstatus;
97
98 smp_mb();
99
100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
101 (JOBR_DEPTH - 1);
102
103 /*
104 * if this job completed out-of-order, do not increment
105 * the tail. Otherwise, increment tail by 1 plus the
106 * number of subsequent jobs already completed out-of-order
107 */
108 if (sw_idx == tail) {
109 do {
110 tail = (tail + 1) & (JOBR_DEPTH - 1);
111 smp_read_barrier_depends();
112 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
113 jrp->entinfo[tail].desc_addr_dma == 0);
114
115 jrp->tail = tail;
116 }
117
118 /* set done */
119 wr_reg32(&jrp->rregs->outring_rmvd, 1);
120
121 spin_unlock_irqrestore(&jrp->outlock, flags);
122
123 /* Finally, execute user's callback */
124 usercall(dev, userdesc, userstatus, userarg);
125
126 spin_lock_irqsave(&jrp->outlock, flags);
127
128 head = ACCESS_ONCE(jrp->head);
129 sw_idx = tail = jrp->tail;
130 }
131
132 spin_unlock_irqrestore(&jrp->outlock, flags);
133
134 /* reenable / unmask IRQs */
135 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
136}
137
138/**
139 * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
140 * an ordinal of the rings allocated, else returns -ENODEV if no rings
141 * are available.
142 * @ctrldev: points to the controller level dev (parent) that
143 * owns rings available for use.
144 * @dev: points to where a pointer to the newly allocated queue's
145 * dev can be written to if successful.
146 **/
147int caam_jr_register(struct device *ctrldev, struct device **rdev)
148{
149 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
150 struct caam_drv_private_jr *jrpriv = NULL;
151 unsigned long flags;
152 int ring;
153
154 /* Lock, if free ring - assign, unlock */
155 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
156 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
157 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
158 if (jrpriv->assign == JOBR_UNASSIGNED) {
159 jrpriv->assign = JOBR_ASSIGNED;
160 *rdev = ctrlpriv->jrdev[ring];
161 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
162 return ring;
163 }
164 }
165
166 /* If assigned, write dev where caller needs it */
167 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
168 *rdev = NULL;
169
170 return -ENODEV;
171}
172EXPORT_SYMBOL(caam_jr_register);
173
174/**
175 * caam_jr_deregister() - Deregister an API and release the queue.
176 * Returns 0 if OK, -EBUSY if queue still contains pending entries
177 * or unprocessed results at the time of the call
178 * @dev - points to the dev that identifies the queue to
179 * be released.
180 **/
181int caam_jr_deregister(struct device *rdev)
182{
183 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
184 struct caam_drv_private *ctrlpriv;
185 unsigned long flags;
186
187 /* Get the owning controller's private space */
188 ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
189
190 /*
191 * Make sure ring empty before release
192 */
193 if (rd_reg32(&jrpriv->rregs->outring_used) ||
194 (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
195 return -EBUSY;
196
197 /* Release ring */
198 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
199 jrpriv->assign = JOBR_UNASSIGNED;
200 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
201
202 return 0;
203}
204EXPORT_SYMBOL(caam_jr_deregister);
205
206/**
207 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
208 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
209 * descriptor.
210 * @dev: device of the job ring to be used. This device should have
211 * been assigned prior by caam_jr_register().
212 * @desc: points to a job descriptor that execute our request. All
213 * descriptors (and all referenced data) must be in a DMAable
214 * region, and all data references must be physical addresses
215 * accessible to CAAM (i.e. within a PAMU window granted
216 * to it).
217 * @cbk: pointer to a callback function to be invoked upon completion
218 * of this request. This has the form:
219 * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
220 * where:
221 * @dev: contains the job ring device that processed this
222 * response.
223 * @desc: descriptor that initiated the request, same as
224 * "desc" being argued to caam_jr_enqueue().
225 * @status: untranslated status received from CAAM. See the
226 * reference manual for a detailed description of
227 * error meaning, or see the JRSTA definitions in the
228 * register header file
229 * @areq: optional pointer to an argument passed with the
230 * original request
231 * @areq: optional pointer to a user argument for use at callback
232 * time.
233 **/
234int caam_jr_enqueue(struct device *dev, u32 *desc,
235 void (*cbk)(struct device *dev, u32 *desc,
236 u32 status, void *areq),
237 void *areq)
238{
239 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
240 struct caam_jrentry_info *head_entry;
241 unsigned long flags;
242 int head, tail, desc_size;
243 dma_addr_t desc_dma;
244
245 desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
246 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
247 if (dma_mapping_error(dev, desc_dma)) {
248 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
249 return -EIO;
250 }
251
252 spin_lock_irqsave(&jrp->inplock, flags);
253
254 head = jrp->head;
255 tail = ACCESS_ONCE(jrp->tail);
256
257 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
258 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
259 spin_unlock_irqrestore(&jrp->inplock, flags);
260 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
261 return -EBUSY;
262 }
263
264 head_entry = &jrp->entinfo[head];
265 head_entry->desc_addr_virt = desc;
266 head_entry->desc_size = desc_size;
267 head_entry->callbk = (void *)cbk;
268 head_entry->cbkarg = areq;
269 head_entry->desc_addr_dma = desc_dma;
270
271 jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
272
273 smp_wmb();
274
275 jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
276 (JOBR_DEPTH - 1);
277 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
278
279 wmb();
280
281 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
282
283 spin_unlock_irqrestore(&jrp->inplock, flags);
284
285 return 0;
286}
287EXPORT_SYMBOL(caam_jr_enqueue);
288
289static int caam_reset_hw_jr(struct device *dev)
290{
291 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
292 unsigned int timeout = 100000;
293
294 /*
295 * FIXME: disabling IRQs here inhibits proper job completion
296 * and error propagation
297 */
298 disable_irq(jrp->irq);
299
300 /* initiate flush (required prior to reset) */
301 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
302 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
303 JRINT_ERR_HALT_INPROGRESS) && --timeout)
304 cpu_relax();
305
306 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
307 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
308 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
309 return -EIO;
310 }
311
312 /* initiate reset */
313 timeout = 100000;
314 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
315 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
316 cpu_relax();
317
318 if (timeout == 0) {
319 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
320 return -EIO;
321 }
322
323 enable_irq(jrp->irq);
324
325 return 0;
326}
327
328/*
329 * Init JobR independent of platform property detection
330 */
331static int caam_jr_init(struct device *dev)
332{
333 struct caam_drv_private_jr *jrp;
334 dma_addr_t inpbusaddr, outbusaddr;
335 int i, error;
336
337 jrp = dev_get_drvdata(dev);
338
339 error = caam_reset_hw_jr(dev);
340 if (error)
341 return error;
342
343 jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
344 GFP_KERNEL | GFP_DMA);
345 jrp->outring = kzalloc(sizeof(struct jr_outentry) *
346 JOBR_DEPTH, GFP_KERNEL | GFP_DMA);
347
348 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
349 GFP_KERNEL);
350
351 if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
352 (jrp->entinfo == NULL)) {
353 dev_err(dev, "can't allocate job rings for %d\n",
354 jrp->ridx);
355 return -ENOMEM;
356 }
357
358 for (i = 0; i < JOBR_DEPTH; i++)
359 jrp->entinfo[i].desc_addr_dma = !0;
360
361 /* Setup rings */
362 inpbusaddr = dma_map_single(dev, jrp->inpring,
363 sizeof(u32 *) * JOBR_DEPTH,
364 DMA_BIDIRECTIONAL);
365 if (dma_mapping_error(dev, inpbusaddr)) {
366 dev_err(dev, "caam_jr_init(): can't map input ring\n");
367 kfree(jrp->inpring);
368 kfree(jrp->outring);
369 kfree(jrp->entinfo);
370 return -EIO;
371 }
372
373 outbusaddr = dma_map_single(dev, jrp->outring,
374 sizeof(struct jr_outentry) * JOBR_DEPTH,
375 DMA_BIDIRECTIONAL);
376 if (dma_mapping_error(dev, outbusaddr)) {
377 dev_err(dev, "caam_jr_init(): can't map output ring\n");
378 dma_unmap_single(dev, inpbusaddr,
379 sizeof(u32 *) * JOBR_DEPTH,
380 DMA_BIDIRECTIONAL);
381 kfree(jrp->inpring);
382 kfree(jrp->outring);
383 kfree(jrp->entinfo);
384 return -EIO;
385 }
386
387 jrp->inp_ring_write_index = 0;
388 jrp->out_ring_read_index = 0;
389 jrp->head = 0;
390 jrp->tail = 0;
391
392 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
393 wr_reg64(&jrp->rregs->outring_base, outbusaddr);
394 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
395 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
396
397 jrp->ringsize = JOBR_DEPTH;
398
399 spin_lock_init(&jrp->inplock);
400 spin_lock_init(&jrp->outlock);
401
402 /* Select interrupt coalescing parameters */
403 setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
404 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
405 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
406
407 /* Connect job ring interrupt handler. */
408 for_each_possible_cpu(i)
409 tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
410 (unsigned long)dev);
411
412 error = request_irq(jrp->irq, caam_jr_interrupt, 0,
413 "caam-jobr", dev);
414 if (error) {
415 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
416 jrp->ridx, jrp->irq);
417 irq_dispose_mapping(jrp->irq);
418 jrp->irq = 0;
419 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
420 DMA_BIDIRECTIONAL);
421 dma_unmap_single(dev, outbusaddr, sizeof(u32 *) * JOBR_DEPTH,
422 DMA_BIDIRECTIONAL);
423 kfree(jrp->inpring);
424 kfree(jrp->outring);
425 kfree(jrp->entinfo);
426 return -EINVAL;
427 }
428
429 jrp->assign = JOBR_UNASSIGNED;
430 return 0;
431}
432
433/*
434 * Shutdown JobR independent of platform property code
435 */
436int caam_jr_shutdown(struct device *dev)
437{
438 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
439 dma_addr_t inpbusaddr, outbusaddr;
440 int ret, i;
441
442 ret = caam_reset_hw_jr(dev);
443
444 for_each_possible_cpu(i)
445 tasklet_kill(&jrp->irqtask[i]);
446
447 /* Release interrupt */
448 free_irq(jrp->irq, dev);
449
450 /* Free rings */
451 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
452 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
453 dma_unmap_single(dev, outbusaddr,
454 sizeof(struct jr_outentry) * JOBR_DEPTH,
455 DMA_BIDIRECTIONAL);
456 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
457 DMA_BIDIRECTIONAL);
458 kfree(jrp->outring);
459 kfree(jrp->inpring);
460 kfree(jrp->entinfo);
461
462 return ret;
463}
464
465/*
466 * Probe routine for each detected JobR subsystem. It assumes that
467 * property detection was picked up externally.
468 */
469int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
470 int ring)
471{
472 struct device *ctrldev, *jrdev;
473 struct platform_device *jr_pdev;
474 struct caam_drv_private *ctrlpriv;
475 struct caam_drv_private_jr *jrpriv;
476 u32 *jroffset;
477 int error;
478
479 ctrldev = &pdev->dev;
480 ctrlpriv = dev_get_drvdata(ctrldev);
481
482 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
483 GFP_KERNEL);
484 if (jrpriv == NULL) {
485 dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
486 ring);
487 return -ENOMEM;
488 }
489 jrpriv->parentdev = ctrldev; /* point back to parent */
490 jrpriv->ridx = ring; /* save ring identity relative to detection */
491
492 /*
493 * Derive a pointer to the detected JobRs regs
494 * Driver has already iomapped the entire space, we just
495 * need to add in the offset to this JobR. Don't know if I
496 * like this long-term, but it'll run
497 */
498 jroffset = (u32 *)of_get_property(np, "reg", NULL);
499 jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
500 + *jroffset);
501
502 /* Build a local dev for each detected queue */
503 jr_pdev = of_platform_device_create(np, NULL, ctrldev);
504 if (jr_pdev == NULL) {
505 kfree(jrpriv);
506 return -EINVAL;
507 }
508 jrdev = &jr_pdev->dev;
509 dev_set_drvdata(jrdev, jrpriv);
510 ctrlpriv->jrdev[ring] = jrdev;
511
512 /* Identify the interrupt */
513 jrpriv->irq = of_irq_to_resource(np, 0, NULL);
514
515 /* Now do the platform independent part */
516 error = caam_jr_init(jrdev); /* now turn on hardware */
517 if (error) {
518 kfree(jrpriv);
519 return error;
520 }
521
522 return error;
523}
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
new file mode 100644
index 000000000000..c23df395b622
--- /dev/null
+++ b/drivers/crypto/caam/jr.h
@@ -0,0 +1,21 @@
1/*
2 * CAAM public-level include definitions for the JobR backend
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef JR_H
8#define JR_H
9
10/* Prototypes for backend-level services exposed to APIs */
11int caam_jr_register(struct device *ctrldev, struct device **rdev);
12int caam_jr_deregister(struct device *rdev);
13int caam_jr_enqueue(struct device *dev, u32 *desc,
14 void (*cbk)(struct device *dev, u32 *desc, u32 status,
15 void *areq),
16 void *areq);
17
18extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
19 int ring);
20extern int caam_jr_shutdown(struct device *dev);
21#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
new file mode 100644
index 000000000000..d063a260958b
--- /dev/null
+++ b/drivers/crypto/caam/regs.h
@@ -0,0 +1,663 @@
1/*
2 * CAAM hardware register-level view
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef REGS_H
8#define REGS_H
9
10#include <linux/types.h>
11#include <linux/io.h>
12
13/*
14 * Architecture-specific register access methods
15 *
16 * CAAM's bus-addressable registers are 64 bits internally.
17 * They have been wired to be safely accessible on 32-bit
18 * architectures, however. Registers were organized such
19 * that (a) they can be contained in 32 bits, (b) if not, then they
20 * can be treated as two 32-bit entities, or finally (c) if they
21 * must be treated as a single 64-bit value, then this can safely
22 * be done with two 32-bit cycles.
23 *
24 * For 32-bit operations on 64-bit values, CAAM follows the same
25 * 64-bit register access conventions as it's predecessors, in that
26 * writes are "triggered" by a write to the register at the numerically
27 * higher address, thus, a full 64-bit write cycle requires a write
28 * to the lower address, followed by a write to the higher address,
29 * which will latch/execute the write cycle.
30 *
31 * For example, let's assume a SW reset of CAAM through the master
32 * configuration register.
33 * - SWRST is in bit 31 of MCFG.
34 * - MCFG begins at base+0x0000.
35 * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
36 * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
37 *
38 * (and on Power, the convention is 0-31, 32-63, I know...)
39 *
40 * Assuming a 64-bit write to this MCFG to perform a software reset
41 * would then require a write of 0 to base+0x0000, followed by a
42 * write of 0x80000000 to base+0x0004, which would "execute" the
43 * reset.
44 *
45 * Of course, since MCFG 63-32 is all zero, we could cheat and simply
46 * write 0x8000000 to base+0x0004, and the reset would work fine.
47 * However, since CAAM does contain some write-and-read-intended
48 * 64-bit registers, this code defines 64-bit access methods for
49 * the sake of internal consistency and simplicity, and so that a
50 * clean transition to 64-bit is possible when it becomes necessary.
51 *
52 * There are limitations to this that the developer must recognize.
53 * 32-bit architectures cannot enforce an atomic-64 operation,
54 * Therefore:
55 *
56 * - On writes, since the HW is assumed to latch the cycle on the
57 * write of the higher-numeric-address word, then ordered
58 * writes work OK.
59 *
60 * - For reads, where a register contains a relevant value of more
61 * that 32 bits, the hardware employs logic to latch the other
62 * "half" of the data until read, ensuring an accurate value.
63 * This is of particular relevance when dealing with CAAM's
64 * performance counters.
65 *
66 */
67
68#ifdef __BIG_ENDIAN
69#define wr_reg32(reg, data) out_be32(reg, data)
70#define rd_reg32(reg) in_be32(reg)
71#ifdef CONFIG_64BIT
72#define wr_reg64(reg, data) out_be64(reg, data)
73#define rd_reg64(reg) in_be64(reg)
74#endif
75#else
76#ifdef __LITTLE_ENDIAN
77#define wr_reg32(reg, data) __raw_writel(reg, data)
78#define rd_reg32(reg) __raw_readl(reg)
79#ifdef CONFIG_64BIT
80#define wr_reg64(reg, data) __raw_writeq(reg, data)
81#define rd_reg64(reg) __raw_readq(reg)
82#endif
83#endif
84#endif
85
86#ifndef CONFIG_64BIT
87static inline void wr_reg64(u64 __iomem *reg, u64 data)
88{
89 wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
90 wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull);
91}
92
93static inline u64 rd_reg64(u64 __iomem *reg)
94{
95 return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
96 ((u64)rd_reg32((u32 __iomem *)reg + 1));
97}
98#endif
99
100/*
101 * jr_outentry
102 * Represents each entry in a JobR output ring
103 */
104struct jr_outentry {
105 dma_addr_t desc;/* Pointer to completed descriptor */
106 u32 jrstatus; /* Status for completed descriptor */
107} __packed;
108
109/*
110 * caam_perfmon - Performance Monitor/Secure Memory Status/
111 * CAAM Global Status/Component Version IDs
112 *
113 * Spans f00-fff wherever instantiated
114 */
115
116/* Number of DECOs */
117#define CHA_NUM_DECONUM_SHIFT 56
118#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
119
120struct caam_perfmon {
121 /* Performance Monitor Registers f00-f9f */
122 u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
123 u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */
124 u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests */
125 u64 ob_enc_bytes; /* PC_OB_ENCRYPT - Outbound Bytes Encrypted */
126 u64 ob_prot_bytes; /* PC_OB_PROTECT - Outbound Bytes Protected */
127 u64 ib_dec_bytes; /* PC_IB_DECRYPT - Inbound Bytes Decrypted */
128 u64 ib_valid_bytes; /* PC_IB_VALIDATED Inbound Bytes Validated */
129 u64 rsvd[13];
130
131 /* CAAM Hardware Instantiation Parameters fa0-fbf */
132 u64 cha_rev; /* CRNR - CHA Revision Number */
133#define CTPR_QI_SHIFT 57
134#define CTPR_QI_MASK (0x1ull << CHA_NUM_DECONUM_SHIFT)
135 u64 comp_parms; /* CTPR - Compile Parameters Register */
136 u64 rsvd1[2];
137
138 /* CAAM Global Status fc0-fdf */
139 u64 faultaddr; /* FAR - Fault Address */
140 u32 faultliodn; /* FALR - Fault Address LIODN */
141 u32 faultdetail; /* FADR - Fault Addr Detail */
142 u32 rsvd2;
143 u32 status; /* CSTA - CAAM Status */
144 u64 rsvd3;
145
146 /* Component Instantiation Parameters fe0-fff */
147 u32 rtic_id; /* RVID - RTIC Version ID */
148 u32 ccb_id; /* CCBVID - CCB Version ID */
149 u64 cha_id; /* CHAVID - CHA Version ID */
150 u64 cha_num; /* CHANUM - CHA Number */
151 u64 caam_id; /* CAAMVID - CAAM Version ID */
152};
153
154/* LIODN programming for DMA configuration */
155#define MSTRID_LOCK_LIODN 0x80000000
156#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */
157
158#define MSTRID_LIODN_MASK 0x0fff
159struct masterid {
160 u32 liodn_ms; /* lock and make-trusted control bits */
161 u32 liodn_ls; /* LIODN for non-sequence and seq access */
162};
163
164/* Partition ID for DMA configuration */
165struct partid {
166 u32 rsvd1;
167 u32 pidr; /* partition ID, DECO */
168};
169
170/* RNG test mode (replicated twice in some configurations) */
171/* Padded out to 0x100 */
172struct rngtst {
173 u32 mode; /* RTSTMODEx - Test mode */
174 u32 rsvd1[3];
175 u32 reset; /* RTSTRESETx - Test reset control */
176 u32 rsvd2[3];
177 u32 status; /* RTSTSSTATUSx - Test status */
178 u32 rsvd3;
179 u32 errstat; /* RTSTERRSTATx - Test error status */
180 u32 rsvd4;
181 u32 errctl; /* RTSTERRCTLx - Test error control */
182 u32 rsvd5;
183 u32 entropy; /* RTSTENTROPYx - Test entropy */
184 u32 rsvd6[15];
185 u32 verifctl; /* RTSTVERIFCTLx - Test verification control */
186 u32 rsvd7;
187 u32 verifstat; /* RTSTVERIFSTATx - Test verification status */
188 u32 rsvd8;
189 u32 verifdata; /* RTSTVERIFDx - Test verification data */
190 u32 rsvd9;
191 u32 xkey; /* RTSTXKEYx - Test XKEY */
192 u32 rsvd10;
193 u32 oscctctl; /* RTSTOSCCTCTLx - Test osc. counter control */
194 u32 rsvd11;
195 u32 oscct; /* RTSTOSCCTx - Test oscillator counter */
196 u32 rsvd12;
197 u32 oscctstat; /* RTSTODCCTSTATx - Test osc counter status */
198 u32 rsvd13[2];
199 u32 ofifo[4]; /* RTSTOFIFOx - Test output FIFO */
200 u32 rsvd14[15];
201};
202
203/*
204 * caam_ctrl - basic core configuration
205 * starts base + 0x0000 padded out to 0x1000
206 */
207
208#define KEK_KEY_SIZE 8
209#define TKEK_KEY_SIZE 8
210#define TDSK_KEY_SIZE 8
211
212#define DECO_RESET 1 /* Use with DECO reset/availability regs */
213#define DECO_RESET_0 (DECO_RESET << 0)
214#define DECO_RESET_1 (DECO_RESET << 1)
215#define DECO_RESET_2 (DECO_RESET << 2)
216#define DECO_RESET_3 (DECO_RESET << 3)
217#define DECO_RESET_4 (DECO_RESET << 4)
218
219struct caam_ctrl {
220 /* Basic Configuration Section 000-01f */
221 /* Read/Writable */
222 u32 rsvd1;
223 u32 mcr; /* MCFG Master Config Register */
224 u32 rsvd2[2];
225
226 /* Bus Access Configuration Section 010-11f */
227 /* Read/Writable */
228 struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
229 u32 rsvd3[12];
230 struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
231 u32 rsvd4[7];
232 u32 deco_rq; /* DECORR - DECO Request */
233 struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
234 u32 rsvd5[22];
235
236 /* DECO Availability/Reset Section 120-3ff */
237 u32 deco_avail; /* DAR - DECO availability */
238 u32 deco_reset; /* DRR - DECO reset */
239 u32 rsvd6[182];
240
241 /* Key Encryption/Decryption Configuration 400-5ff */
242 /* Read/Writable only while in Non-secure mode */
243 u32 kek[KEK_KEY_SIZE]; /* JDKEKR - Key Encryption Key */
244 u32 tkek[TKEK_KEY_SIZE]; /* TDKEKR - Trusted Desc KEK */
245 u32 tdsk[TDSK_KEY_SIZE]; /* TDSKR - Trusted Desc Signing Key */
246 u32 rsvd7[32];
247 u64 sknonce; /* SKNR - Secure Key Nonce */
248 u32 rsvd8[70];
249
250 /* RNG Test/Verification/Debug Access 600-7ff */
251 /* (Useful in Test/Debug modes only...) */
252 struct rngtst rtst[2];
253
254 u32 rsvd9[448];
255
256 /* Performance Monitor f00-fff */
257 struct caam_perfmon perfmon;
258};
259
260/*
261 * Controller master config register defs
262 */
263#define MCFGR_SWRESET 0x80000000 /* software reset */
264#define MCFGR_WDENABLE 0x40000000 /* DECO watchdog enable */
265#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */
266#define MCFGR_DMA_RESET 0x10000000
267#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
268
269/* AXI read cache control */
270#define MCFGR_ARCACHE_SHIFT 12
271#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
272
273/* AXI write cache control */
274#define MCFGR_AWCACHE_SHIFT 8
275#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
276
277/* AXI pipeline depth */
278#define MCFGR_AXIPIPE_SHIFT 4
279#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
280
281#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
282#define MCFGR_BURST_64 0x00000001 /* Max burst size */
283
284/*
285 * caam_job_ring - direct job ring setup
286 * 1-4 possible per instantiation, base + 1000/2000/3000/4000
287 * Padded out to 0x1000
288 */
289struct caam_job_ring {
290 /* Input ring */
291 u64 inpring_base; /* IRBAx - Input desc ring baseaddr */
292 u32 rsvd1;
293 u32 inpring_size; /* IRSx - Input ring size */
294 u32 rsvd2;
295 u32 inpring_avail; /* IRSAx - Input ring room remaining */
296 u32 rsvd3;
297 u32 inpring_jobadd; /* IRJAx - Input ring jobs added */
298
299 /* Output Ring */
300 u64 outring_base; /* ORBAx - Output status ring base addr */
301 u32 rsvd4;
302 u32 outring_size; /* ORSx - Output ring size */
303 u32 rsvd5;
304 u32 outring_rmvd; /* ORJRx - Output ring jobs removed */
305 u32 rsvd6;
306 u32 outring_used; /* ORSFx - Output ring slots full */
307
308 /* Status/Configuration */
309 u32 rsvd7;
310 u32 jroutstatus; /* JRSTAx - JobR output status */
311 u32 rsvd8;
312 u32 jrintstatus; /* JRINTx - JobR interrupt status */
313 u32 rconfig_hi; /* JRxCFG - Ring configuration */
314 u32 rconfig_lo;
315
316 /* Indices. CAAM maintains as "heads" of each queue */
317 u32 rsvd9;
318 u32 inp_rdidx; /* IRRIx - Input ring read index */
319 u32 rsvd10;
320 u32 out_wtidx; /* ORWIx - Output ring write index */
321
322 /* Command/control */
323 u32 rsvd11;
324 u32 jrcommand; /* JRCRx - JobR command */
325
326 u32 rsvd12[932];
327
328 /* Performance Monitor f00-fff */
329 struct caam_perfmon perfmon;
330};
331
332#define JR_RINGSIZE_MASK 0x03ff
333/*
334 * jrstatus - Job Ring Output Status
335 * All values in lo word
336 * Also note, same values written out as status through QI
337 * in the command/status field of a frame descriptor
338 */
339#define JRSTA_SSRC_SHIFT 28
340#define JRSTA_SSRC_MASK 0xf0000000
341
342#define JRSTA_SSRC_NONE 0x00000000
343#define JRSTA_SSRC_CCB_ERROR 0x20000000
344#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
345#define JRSTA_SSRC_DECO 0x40000000
346#define JRSTA_SSRC_JRERROR 0x60000000
347#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000
348
349#define JRSTA_DECOERR_JUMP 0x08000000
350#define JRSTA_DECOERR_INDEX_SHIFT 8
351#define JRSTA_DECOERR_INDEX_MASK 0xff00
352#define JRSTA_DECOERR_ERROR_MASK 0x00ff
353
354#define JRSTA_DECOERR_NONE 0x00
355#define JRSTA_DECOERR_LINKLEN 0x01
356#define JRSTA_DECOERR_LINKPTR 0x02
357#define JRSTA_DECOERR_JRCTRL 0x03
358#define JRSTA_DECOERR_DESCCMD 0x04
359#define JRSTA_DECOERR_ORDER 0x05
360#define JRSTA_DECOERR_KEYCMD 0x06
361#define JRSTA_DECOERR_LOADCMD 0x07
362#define JRSTA_DECOERR_STORECMD 0x08
363#define JRSTA_DECOERR_OPCMD 0x09
364#define JRSTA_DECOERR_FIFOLDCMD 0x0a
365#define JRSTA_DECOERR_FIFOSTCMD 0x0b
366#define JRSTA_DECOERR_MOVECMD 0x0c
367#define JRSTA_DECOERR_JUMPCMD 0x0d
368#define JRSTA_DECOERR_MATHCMD 0x0e
369#define JRSTA_DECOERR_SHASHCMD 0x0f
370#define JRSTA_DECOERR_SEQCMD 0x10
371#define JRSTA_DECOERR_DECOINTERNAL 0x11
372#define JRSTA_DECOERR_SHDESCHDR 0x12
373#define JRSTA_DECOERR_HDRLEN 0x13
374#define JRSTA_DECOERR_BURSTER 0x14
375#define JRSTA_DECOERR_DESCSIGNATURE 0x15
376#define JRSTA_DECOERR_DMA 0x16
377#define JRSTA_DECOERR_BURSTFIFO 0x17
378#define JRSTA_DECOERR_JRRESET 0x1a
379#define JRSTA_DECOERR_JOBFAIL 0x1b
380#define JRSTA_DECOERR_DNRERR 0x80
381#define JRSTA_DECOERR_UNDEFPCL 0x81
382#define JRSTA_DECOERR_PDBERR 0x82
383#define JRSTA_DECOERR_ANRPLY_LATE 0x83
384#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
385#define JRSTA_DECOERR_SEQOVF 0x85
386#define JRSTA_DECOERR_INVSIGN 0x86
387#define JRSTA_DECOERR_DSASIGN 0x87
388
389#define JRSTA_CCBERR_JUMP 0x08000000
390#define JRSTA_CCBERR_INDEX_MASK 0xff00
391#define JRSTA_CCBERR_INDEX_SHIFT 8
392#define JRSTA_CCBERR_CHAID_MASK 0x00f0
393#define JRSTA_CCBERR_CHAID_SHIFT 4
394#define JRSTA_CCBERR_ERRID_MASK 0x000f
395
396#define JRSTA_CCBERR_CHAID_AES (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
397#define JRSTA_CCBERR_CHAID_DES (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
398#define JRSTA_CCBERR_CHAID_ARC4 (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
399#define JRSTA_CCBERR_CHAID_MD (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
400#define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
401#define JRSTA_CCBERR_CHAID_SNOW (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
402#define JRSTA_CCBERR_CHAID_KASUMI (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
403#define JRSTA_CCBERR_CHAID_PK (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
404#define JRSTA_CCBERR_CHAID_CRC (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
405
406#define JRSTA_CCBERR_ERRID_NONE 0x00
407#define JRSTA_CCBERR_ERRID_MODE 0x01
408#define JRSTA_CCBERR_ERRID_DATASIZ 0x02
409#define JRSTA_CCBERR_ERRID_KEYSIZ 0x03
410#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
411#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
412#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
413#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
414#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
415#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
416#define JRSTA_CCBERR_ERRID_ICVCHK 0x0a
417#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
418#define JRSTA_CCBERR_ERRID_CCMAAD 0x0c
419#define JRSTA_CCBERR_ERRID_INVCHA 0x0f
420
421#define JRINT_ERR_INDEX_MASK 0x3fff0000
422#define JRINT_ERR_INDEX_SHIFT 16
423#define JRINT_ERR_TYPE_MASK 0xf00
424#define JRINT_ERR_TYPE_SHIFT 8
425#define JRINT_ERR_HALT_MASK 0xc
426#define JRINT_ERR_HALT_SHIFT 2
427#define JRINT_ERR_HALT_INPROGRESS 0x4
428#define JRINT_ERR_HALT_COMPLETE 0x8
429#define JRINT_JR_ERROR 0x02
430#define JRINT_JR_INT 0x01
431
432#define JRINT_ERR_TYPE_WRITE 1
433#define JRINT_ERR_TYPE_BAD_INPADDR 3
434#define JRINT_ERR_TYPE_BAD_OUTADDR 4
435#define JRINT_ERR_TYPE_INV_INPWRT 5
436#define JRINT_ERR_TYPE_INV_OUTWRT 6
437#define JRINT_ERR_TYPE_RESET 7
438#define JRINT_ERR_TYPE_REMOVE_OFL 8
439#define JRINT_ERR_TYPE_ADD_OFL 9
440
441#define JRCFG_SOE 0x04
442#define JRCFG_ICEN 0x02
443#define JRCFG_IMSK 0x01
444#define JRCFG_ICDCT_SHIFT 8
445#define JRCFG_ICTT_SHIFT 16
446
447#define JRCR_RESET 0x01
448
449/*
450 * caam_assurance - Assurance Controller View
451 * base + 0x6000 padded out to 0x1000
452 */
453
454struct rtic_element {
455 u64 address;
456 u32 rsvd;
457 u32 length;
458};
459
460struct rtic_block {
461 struct rtic_element element[2];
462};
463
464struct rtic_memhash {
465 u32 memhash_be[32];
466 u32 memhash_le[32];
467};
468
469struct caam_assurance {
470 /* Status/Command/Watchdog */
471 u32 rsvd1;
472 u32 status; /* RSTA - Status */
473 u32 rsvd2;
474 u32 cmd; /* RCMD - Command */
475 u32 rsvd3;
476 u32 ctrl; /* RCTL - Control */
477 u32 rsvd4;
478 u32 throttle; /* RTHR - Throttle */
479 u32 rsvd5[2];
480 u64 watchdog; /* RWDOG - Watchdog Timer */
481 u32 rsvd6;
482 u32 rend; /* REND - Endian corrections */
483 u32 rsvd7[50];
484
485 /* Block access/configuration @ 100/110/120/130 */
486 struct rtic_block memblk[4]; /* Memory Blocks A-D */
487 u32 rsvd8[32];
488
489 /* Block hashes @ 200/300/400/500 */
490 struct rtic_memhash hash[4]; /* Block hash values A-D */
491 u32 rsvd_3[640];
492};
493
494/*
495 * caam_queue_if - QI configuration and control
496 * starts base + 0x7000, padded out to 0x1000 long
497 */
498
499struct caam_queue_if {
500 u32 qi_control_hi; /* QICTL - QI Control */
501 u32 qi_control_lo;
502 u32 rsvd1;
503 u32 qi_status; /* QISTA - QI Status */
504 u32 qi_deq_cfg_hi; /* QIDQC - QI Dequeue Configuration */
505 u32 qi_deq_cfg_lo;
506 u32 qi_enq_cfg_hi; /* QISEQC - QI Enqueue Command */
507 u32 qi_enq_cfg_lo;
508 u32 rsvd2[1016];
509};
510
511/* QI control bits - low word */
512#define QICTL_DQEN 0x01 /* Enable frame pop */
513#define QICTL_STOP 0x02 /* Stop dequeue/enqueue */
514#define QICTL_SOE 0x04 /* Stop on error */
515
516/* QI control bits - high word */
517#define QICTL_MBSI 0x01
518#define QICTL_MHWSI 0x02
519#define QICTL_MWSI 0x04
520#define QICTL_MDWSI 0x08
521#define QICTL_CBSI 0x10 /* CtrlDataByteSwapInput */
522#define QICTL_CHWSI 0x20 /* CtrlDataHalfSwapInput */
523#define QICTL_CWSI 0x40 /* CtrlDataWordSwapInput */
524#define QICTL_CDWSI 0x80 /* CtrlDataDWordSwapInput */
525#define QICTL_MBSO 0x0100
526#define QICTL_MHWSO 0x0200
527#define QICTL_MWSO 0x0400
528#define QICTL_MDWSO 0x0800
529#define QICTL_CBSO 0x1000 /* CtrlDataByteSwapOutput */
530#define QICTL_CHWSO 0x2000 /* CtrlDataHalfSwapOutput */
531#define QICTL_CWSO 0x4000 /* CtrlDataWordSwapOutput */
532#define QICTL_CDWSO 0x8000 /* CtrlDataDWordSwapOutput */
533#define QICTL_DMBS 0x010000
534#define QICTL_EPO 0x020000
535
536/* QI status bits */
537#define QISTA_PHRDERR 0x01 /* PreHeader Read Error */
538#define QISTA_CFRDERR 0x02 /* Compound Frame Read Error */
539#define QISTA_OFWRERR 0x04 /* Output Frame Read Error */
540#define QISTA_BPDERR 0x08 /* Buffer Pool Depleted */
541#define QISTA_BTSERR 0x10 /* Buffer Undersize */
542#define QISTA_CFWRERR 0x20 /* Compound Frame Write Err */
543#define QISTA_STOPD 0x80000000 /* QI Stopped (see QICTL) */
544
545/* deco_sg_table - DECO view of scatter/gather table */
546struct deco_sg_table {
547 u64 addr; /* Segment Address */
548 u32 elen; /* E, F bits + 30-bit length */
549 u32 bpid_offset; /* Buffer Pool ID + 16-bit length */
550};
551
552/*
553 * caam_deco - descriptor controller - CHA cluster block
554 *
555 * Only accessible when direct DECO access is turned on
556 * (done in DECORR, via MID programmed in DECOxMID
557 *
558 * 5 typical, base + 0x8000/9000/a000/b000
559 * Padded out to 0x1000 long
560 */
561struct caam_deco {
562 u32 rsvd1;
563 u32 cls1_mode; /* CxC1MR - Class 1 Mode */
564 u32 rsvd2;
565 u32 cls1_keysize; /* CxC1KSR - Class 1 Key Size */
566 u32 cls1_datasize_hi; /* CxC1DSR - Class 1 Data Size */
567 u32 cls1_datasize_lo;
568 u32 rsvd3;
569 u32 cls1_icvsize; /* CxC1ICVSR - Class 1 ICV size */
570 u32 rsvd4[5];
571 u32 cha_ctrl; /* CCTLR - CHA control */
572 u32 rsvd5;
573 u32 irq_crtl; /* CxCIRQ - CCB interrupt done/error/clear */
574 u32 rsvd6;
575 u32 clr_written; /* CxCWR - Clear-Written */
576 u32 ccb_status_hi; /* CxCSTA - CCB Status/Error */
577 u32 ccb_status_lo;
578 u32 rsvd7[3];
579 u32 aad_size; /* CxAADSZR - Current AAD Size */
580 u32 rsvd8;
581 u32 cls1_iv_size; /* CxC1IVSZR - Current Class 1 IV Size */
582 u32 rsvd9[7];
583 u32 pkha_a_size; /* PKASZRx - Size of PKHA A */
584 u32 rsvd10;
585 u32 pkha_b_size; /* PKBSZRx - Size of PKHA B */
586 u32 rsvd11;
587 u32 pkha_n_size; /* PKNSZRx - Size of PKHA N */
588 u32 rsvd12;
589 u32 pkha_e_size; /* PKESZRx - Size of PKHA E */
590 u32 rsvd13[24];
591 u32 cls1_ctx[16]; /* CxC1CTXR - Class 1 Context @100 */
592 u32 rsvd14[48];
593 u32 cls1_key[8]; /* CxC1KEYR - Class 1 Key @200 */
594 u32 rsvd15[121];
595 u32 cls2_mode; /* CxC2MR - Class 2 Mode */
596 u32 rsvd16;
597 u32 cls2_keysize; /* CxX2KSR - Class 2 Key Size */
598 u32 cls2_datasize_hi; /* CxC2DSR - Class 2 Data Size */
599 u32 cls2_datasize_lo;
600 u32 rsvd17;
601 u32 cls2_icvsize; /* CxC2ICVSZR - Class 2 ICV Size */
602 u32 rsvd18[56];
603 u32 cls2_ctx[18]; /* CxC2CTXR - Class 2 Context @500 */
604 u32 rsvd19[46];
605 u32 cls2_key[32]; /* CxC2KEYR - Class2 Key @600 */
606 u32 rsvd20[84];
607 u32 inp_infofifo_hi; /* CxIFIFO - Input Info FIFO @7d0 */
608 u32 inp_infofifo_lo;
609 u32 rsvd21[2];
610 u64 inp_datafifo; /* CxDFIFO - Input Data FIFO */
611 u32 rsvd22[2];
612 u64 out_datafifo; /* CxOFIFO - Output Data FIFO */
613 u32 rsvd23[2];
614 u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
615 u32 jr_ctl_lo;
616 u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
617 u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
618 u32 op_status_lo;
619 u32 rsvd24[2];
620 u32 liodn; /* DxLSR - DECO LIODN Status - non-seq */
621 u32 td_liodn; /* DxLSR - DECO LIODN Status - trustdesc */
622 u32 rsvd26[6];
623 u64 math[4]; /* DxMTH - Math register */
624 u32 rsvd27[8];
625 struct deco_sg_table gthr_tbl[4]; /* DxGTR - Gather Tables */
626 u32 rsvd28[16];
627 struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
628 u32 rsvd29[48];
629 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
630 u32 rsvd30[320];
631};
632
633/*
634 * Current top-level view of memory map is:
635 *
636 * 0x0000 - 0x0fff - CAAM Top-Level Control
637 * 0x1000 - 0x1fff - Job Ring 0
638 * 0x2000 - 0x2fff - Job Ring 1
639 * 0x3000 - 0x3fff - Job Ring 2
640 * 0x4000 - 0x4fff - Job Ring 3
641 * 0x5000 - 0x5fff - (unused)
642 * 0x6000 - 0x6fff - Assurance Controller
643 * 0x7000 - 0x7fff - Queue Interface
644 * 0x8000 - 0x8fff - DECO-CCB 0
645 * 0x9000 - 0x9fff - DECO-CCB 1
646 * 0xa000 - 0xafff - DECO-CCB 2
647 * 0xb000 - 0xbfff - DECO-CCB 3
648 * 0xc000 - 0xcfff - DECO-CCB 4
649 *
650 * caam_full describes the full register view of CAAM if useful,
651 * although many configurations may choose to implement parts of
652 * the register map separately, in differing privilege regions
653 */
654struct caam_full {
655 struct caam_ctrl __iomem ctrl;
656 struct caam_job_ring jr[4];
657 u64 rsvd[512];
658 struct caam_assurance assure;
659 struct caam_queue_if qi;
660 struct caam_deco *deco;
661};
662
663#endif /* REGS_H */