aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec4.txt397
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts86
-rw-r--r--arch/s390/crypto/Makefile1
-rw-r--r--arch/s390/crypto/aes_s390.c383
-rw-r--r--arch/s390/crypto/crypt_s390.h112
-rw-r--r--arch/s390/crypto/des_check_key.c132
-rw-r--r--arch/s390/crypto/des_s390.c370
-rw-r--r--arch/s390/crypto/ghash_s390.c162
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/x86/crypto/Makefile4
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c9
-rw-r--r--arch/x86/crypto/fpu.c10
-rw-r--r--crypto/Kconfig6
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c16
-rw-r--r--crypto/testmgr.h60
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/amd-rng.c9
-rw-r--r--drivers/crypto/Kconfig65
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/caam/Kconfig72
-rw-r--r--drivers/crypto/caam/Makefile8
-rw-r--r--drivers/crypto/caam/caamalg.c1268
-rw-r--r--drivers/crypto/caam/compat.h35
-rw-r--r--drivers/crypto/caam/ctrl.c269
-rw-r--r--drivers/crypto/caam/desc.h1605
-rw-r--r--drivers/crypto/caam/desc_constr.h205
-rw-r--r--drivers/crypto/caam/error.c248
-rw-r--r--drivers/crypto/caam/error.h11
-rw-r--r--drivers/crypto/caam/intern.h113
-rw-r--r--drivers/crypto/caam/jr.c517
-rw-r--r--drivers/crypto/caam/jr.h21
-rw-r--r--drivers/crypto/caam/regs.h663
-rw-r--r--drivers/crypto/mv_cesa.c97
-rw-r--r--drivers/crypto/omap-sham.c78
-rw-r--r--drivers/crypto/padlock-sha.c269
-rw-r--r--drivers/crypto/picoxcell_crypto.c64
-rw-r--r--drivers/crypto/s5p-sss.c701
41 files changed, 7660 insertions, 422 deletions
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
new file mode 100644
index 000000000000..bf57ecd5d73a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -0,0 +1,397 @@
1=====================================================================
2SEC 4 Device Tree Binding
3Copyright (C) 2008-2011 Freescale Semiconductor Inc.
4
5 CONTENTS
6 -Overview
7 -SEC 4 Node
8 -Job Ring Node
9 -Run Time Integrity Check (RTIC) Node
10 -Run Time Integrity Check (RTIC) Memory Node
11 -Secure Non-Volatile Storage (SNVS) Node
12 -Full Example
13
14NOTE: the SEC 4 is also known as Freescale's Cryptographic Accelerator
15Accelerator and Assurance Module (CAAM).
16
17=====================================================================
18Overview
19
20DESCRIPTION
21
22SEC 4 h/w can process requests from 2 types of sources.
231. DPAA Queue Interface (HW interface between Queue Manager & SEC 4).
242. Job Rings (HW interface between cores & SEC 4 registers).
25
26High Speed Data Path Configuration:
27
28HW interface between QM & SEC 4 and also BM & SEC 4, on DPAA-enabled parts
29such as the P4080. The number of simultaneous dequeues the QI can make is
30equal to the number of Descriptor Controller (DECO) engines in a particular
31SEC version. E.g., the SEC 4.0 in the P4080 has 5 DECOs and can thus
32dequeue from 5 subportals simultaneously.
33
34Job Ring Data Path Configuration:
35
36Each JR is located on a separate 4k page, they may (or may not) be made visible
37in the memory partition devoted to a particular core. The P4080 has 4 JRs, so
38up to 4 JRs can be configured; and all 4 JRs process requests in parallel.
39
40=====================================================================
41SEC 4 Node
42
43Description
44
45 Node defines the base address of the SEC 4 block.
46 This block specifies the address range of all global
47 configuration registers for the SEC 4 block. It
48 also receives interrupts from the Run Time Integrity Check
49 (RTIC) function within the SEC 4 block.
50
51PROPERTIES
52
53 - compatible
54 Usage: required
55 Value type: <string>
56 Definition: Must include "fsl,sec-v4.0"
57
58 - #address-cells
59 Usage: required
60 Value type: <u32>
61 Definition: A standard property. Defines the number of cells
62 for representing physical addresses in child nodes.
63
64 - #size-cells
65 Usage: required
66 Value type: <u32>
67 Definition: A standard property. Defines the number of cells
68 for representing the size of physical addresses in
69 child nodes.
70
71 - reg
72 Usage: required
73 Value type: <prop-encoded-array>
74 Definition: A standard property. Specifies the physical
75 address and length of the SEC4 configuration registers.
76 registers
77
78 - ranges
79 Usage: required
80 Value type: <prop-encoded-array>
81 Definition: A standard property. Specifies the physical address
82 range of the SEC 4.0 register space (-SNVS not included). A
83 triplet that includes the child address, parent address, &
84 length.
85
86 - interrupts
87 Usage: required
88 Value type: <prop_encoded-array>
89 Definition: Specifies the interrupts generated by this
90 device. The value of the interrupts property
91 consists of one interrupt specifier. The format
92 of the specifier is defined by the binding document
93 describing the node's interrupt parent.
94
95 - interrupt-parent
96 Usage: (required if interrupt property is defined)
97 Value type: <phandle>
98 Definition: A single <phandle> value that points
99 to the interrupt parent to which the child domain
100 is being mapped.
101
102 Note: All other standard properties (see the ePAPR) are allowed
103 but are optional.
104
105
106EXAMPLE
107 crypto@300000 {
108 compatible = "fsl,sec-v4.0";
109 #address-cells = <1>;
110 #size-cells = <1>;
111 reg = <0x300000 0x10000>;
112 ranges = <0 0x300000 0x10000>;
113 interrupt-parent = <&mpic>;
114 interrupts = <92 2>;
115 };
116
117=====================================================================
118Job Ring (JR) Node
119
120 Child of the crypto node defines data processing interface to SEC 4
121 across the peripheral bus for purposes of processing
122 cryptographic descriptors. The specified address
123 range can be made visible to one (or more) cores.
124 The interrupt defined for this node is controlled within
125 the address range of this node.
126
127 - compatible
128 Usage: required
129 Value type: <string>
130 Definition: Must include "fsl,sec-v4.0-job-ring"
131
132 - reg
133 Usage: required
134 Value type: <prop-encoded-array>
135 Definition: Specifies a two JR parameters: an offset from
136 the parent physical address and the length the JR registers.
137
138 - fsl,liodn
139 Usage: optional-but-recommended
140 Value type: <prop-encoded-array>
141 Definition:
142 Specifies the LIODN to be used in conjunction with
143 the ppid-to-liodn table that specifies the PPID to LIODN mapping.
144 Needed if the PAMU is used. Value is a 12 bit value
145 where value is a LIODN ID for this JR. This property is
146 normally set by boot firmware.
147
148 - interrupts
149 Usage: required
150 Value type: <prop_encoded-array>
151 Definition: Specifies the interrupts generated by this
152 device. The value of the interrupts property
153 consists of one interrupt specifier. The format
154 of the specifier is defined by the binding document
155 describing the node's interrupt parent.
156
157 - interrupt-parent
158 Usage: (required if interrupt property is defined)
159 Value type: <phandle>
160 Definition: A single <phandle> value that points
161 to the interrupt parent to which the child domain
162 is being mapped.
163
164EXAMPLE
165 jr@1000 {
166 compatible = "fsl,sec-v4.0-job-ring";
167 reg = <0x1000 0x1000>;
168 fsl,liodn = <0x081>;
169 interrupt-parent = <&mpic>;
170 interrupts = <88 2>;
171 };
172
173
174=====================================================================
175Run Time Integrity Check (RTIC) Node
176
177 Child node of the crypto node. Defines a register space that
178 contains up to 5 sets of addresses and their lengths (sizes) that
179 will be checked at run time. After an initial hash result is
180 calculated, these addresses are checked by HW to monitor any
181 change. If any memory is modified, a Security Violation is
182 triggered (see SNVS definition).
183
184
185 - compatible
186 Usage: required
187 Value type: <string>
188 Definition: Must include "fsl,sec-v4.0-rtic".
189
190 - #address-cells
191 Usage: required
192 Value type: <u32>
193 Definition: A standard property. Defines the number of cells
194 for representing physical addresses in child nodes. Must
195 have a value of 1.
196
197 - #size-cells
198 Usage: required
199 Value type: <u32>
200 Definition: A standard property. Defines the number of cells
201 for representing the size of physical addresses in
202 child nodes. Must have a value of 1.
203
204 - reg
205 Usage: required
206 Value type: <prop-encoded-array>
207 Definition: A standard property. Specifies a two parameters:
208 an offset from the parent physical address and the length
209 the SEC4 registers.
210
211 - ranges
212 Usage: required
213 Value type: <prop-encoded-array>
214 Definition: A standard property. Specifies the physical address
215 range of the SEC 4 register space (-SNVS not included). A
216 triplet that includes the child address, parent address, &
217 length.
218
219EXAMPLE
220 rtic@6000 {
221 compatible = "fsl,sec-v4.0-rtic";
222 #address-cells = <1>;
223 #size-cells = <1>;
224 reg = <0x6000 0x100>;
225 ranges = <0x0 0x6100 0xe00>;
226 };
227
228=====================================================================
229Run Time Integrity Check (RTIC) Memory Node
230 A child node that defines individual RTIC memory regions that are used to
231 perform run-time integrity check of memory areas that should not modified.
232 The node defines a register that contains the memory address &
233 length (combined) and a second register that contains the hash result
234 in big endian format.
235
236 - compatible
237 Usage: required
238 Value type: <string>
239 Definition: Must include "fsl,sec-v4.0-rtic-memory".
240
241 - reg
242 Usage: required
243 Value type: <prop-encoded-array>
244 Definition: A standard property. Specifies two parameters:
245 an offset from the parent physical address and the length:
246
247 1. The location of the RTIC memory address & length registers.
248 2. The location RTIC hash result.
249
250 - fsl,rtic-region
251 Usage: optional-but-recommended
252 Value type: <prop-encoded-array>
253 Definition:
254 Specifies the HW address (36 bit address) for this region
255 followed by the length of the HW partition to be checked;
256 the address is represented as a 64 bit quantity followed
257 by a 32 bit length.
258
259 - fsl,liodn
260 Usage: optional-but-recommended
261 Value type: <prop-encoded-array>
262 Definition:
263 Specifies the LIODN to be used in conjunction with
264 the ppid-to-liodn table that specifies the PPID to LIODN
265 mapping. Needed if the PAMU is used. Value is a 12 bit value
266 where value is a LIODN ID for this RTIC memory region. This
267 property is normally set by boot firmware.
268
269EXAMPLE
270 rtic-a@0 {
271 compatible = "fsl,sec-v4.0-rtic-memory";
272 reg = <0x00 0x20 0x100 0x80>;
273 fsl,liodn = <0x03c>;
274 fsl,rtic-region = <0x12345678 0x12345678 0x12345678>;
275 };
276
277=====================================================================
278Secure Non-Volatile Storage (SNVS) Node
279
280 Node defines address range and the associated
281 interrupt for the SNVS function. This function
282 monitors security state information & reports
283 security violations.
284
285 - compatible
286 Usage: required
287 Value type: <string>
288 Definition: Must include "fsl,sec-v4.0-mon".
289
290 - reg
291 Usage: required
292 Value type: <prop-encoded-array>
293 Definition: A standard property. Specifies the physical
294 address and length of the SEC4 configuration
295 registers.
296
297 - interrupts
298 Usage: required
299 Value type: <prop_encoded-array>
300 Definition: Specifies the interrupts generated by this
301 device. The value of the interrupts property
302 consists of one interrupt specifier. The format
303 of the specifier is defined by the binding document
304 describing the node's interrupt parent.
305
306 - interrupt-parent
307 Usage: (required if interrupt property is defined)
308 Value type: <phandle>
309 Definition: A single <phandle> value that points
310 to the interrupt parent to which the child domain
311 is being mapped.
312
313EXAMPLE
314 sec_mon@314000 {
315 compatible = "fsl,sec-v4.0-mon";
316 reg = <0x314000 0x1000>;
317 interrupt-parent = <&mpic>;
318 interrupts = <93 2>;
319 };
320
321=====================================================================
322FULL EXAMPLE
323
324 crypto: crypto@300000 {
325 compatible = "fsl,sec-v4.0";
326 #address-cells = <1>;
327 #size-cells = <1>;
328 reg = <0x300000 0x10000>;
329 ranges = <0 0x300000 0x10000>;
330 interrupt-parent = <&mpic>;
331 interrupts = <92 2>;
332
333 sec_jr0: jr@1000 {
334 compatible = "fsl,sec-v4.0-job-ring";
335 reg = <0x1000 0x1000>;
336 interrupt-parent = <&mpic>;
337 interrupts = <88 2>;
338 };
339
340 sec_jr1: jr@2000 {
341 compatible = "fsl,sec-v4.0-job-ring";
342 reg = <0x2000 0x1000>;
343 interrupt-parent = <&mpic>;
344 interrupts = <89 2>;
345 };
346
347 sec_jr2: jr@3000 {
348 compatible = "fsl,sec-v4.0-job-ring";
349 reg = <0x3000 0x1000>;
350 interrupt-parent = <&mpic>;
351 interrupts = <90 2>;
352 };
353
354 sec_jr3: jr@4000 {
355 compatible = "fsl,sec-v4.0-job-ring";
356 reg = <0x4000 0x1000>;
357 interrupt-parent = <&mpic>;
358 interrupts = <91 2>;
359 };
360
361 rtic@6000 {
362 compatible = "fsl,sec-v4.0-rtic";
363 #address-cells = <1>;
364 #size-cells = <1>;
365 reg = <0x6000 0x100>;
366 ranges = <0x0 0x6100 0xe00>;
367
368 rtic_a: rtic-a@0 {
369 compatible = "fsl,sec-v4.0-rtic-memory";
370 reg = <0x00 0x20 0x100 0x80>;
371 };
372
373 rtic_b: rtic-b@20 {
374 compatible = "fsl,sec-v4.0-rtic-memory";
375 reg = <0x20 0x20 0x200 0x80>;
376 };
377
378 rtic_c: rtic-c@40 {
379 compatible = "fsl,sec-v4.0-rtic-memory";
380 reg = <0x40 0x20 0x300 0x80>;
381 };
382
383 rtic_d: rtic-d@60 {
384 compatible = "fsl,sec-v4.0-rtic-memory";
385 reg = <0x60 0x20 0x500 0x80>;
386 };
387 };
388 };
389
390 sec_mon: sec_mon@314000 {
391 compatible = "fsl,sec-v4.0-mon";
392 reg = <0x314000 0x1000>;
393 interrupt-parent = <&mpic>;
394 interrupts = <93 2>;
395 };
396
397=====================================================================
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 5b7fc29dd6cf..927f94d16e9b 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P4080DS Device Tree Source 2 * P4080DS Device Tree Source
3 * 3 *
4 * Copyright 2009 Freescale Semiconductor Inc. 4 * Copyright 2009-2011 Freescale Semiconductor Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -33,6 +33,17 @@
33 dma1 = &dma1; 33 dma1 = &dma1;
34 sdhc = &sdhc; 34 sdhc = &sdhc;
35 35
36 crypto = &crypto;
37 sec_jr0 = &sec_jr0;
38 sec_jr1 = &sec_jr1;
39 sec_jr2 = &sec_jr2;
40 sec_jr3 = &sec_jr3;
41 rtic_a = &rtic_a;
42 rtic_b = &rtic_b;
43 rtic_c = &rtic_c;
44 rtic_d = &rtic_d;
45 sec_mon = &sec_mon;
46
36 rio0 = &rapidio0; 47 rio0 = &rapidio0;
37 }; 48 };
38 49
@@ -410,6 +421,79 @@
410 dr_mode = "host"; 421 dr_mode = "host";
411 phy_type = "ulpi"; 422 phy_type = "ulpi";
412 }; 423 };
424
425 crypto: crypto@300000 {
426 compatible = "fsl,sec-v4.0";
427 #address-cells = <1>;
428 #size-cells = <1>;
429 reg = <0x300000 0x10000>;
430 ranges = <0 0x300000 0x10000>;
431 interrupt-parent = <&mpic>;
432 interrupts = <92 2>;
433
434 sec_jr0: jr@1000 {
435 compatible = "fsl,sec-v4.0-job-ring";
436 reg = <0x1000 0x1000>;
437 interrupt-parent = <&mpic>;
438 interrupts = <88 2>;
439 };
440
441 sec_jr1: jr@2000 {
442 compatible = "fsl,sec-v4.0-job-ring";
443 reg = <0x2000 0x1000>;
444 interrupt-parent = <&mpic>;
445 interrupts = <89 2>;
446 };
447
448 sec_jr2: jr@3000 {
449 compatible = "fsl,sec-v4.0-job-ring";
450 reg = <0x3000 0x1000>;
451 interrupt-parent = <&mpic>;
452 interrupts = <90 2>;
453 };
454
455 sec_jr3: jr@4000 {
456 compatible = "fsl,sec-v4.0-job-ring";
457 reg = <0x4000 0x1000>;
458 interrupt-parent = <&mpic>;
459 interrupts = <91 2>;
460 };
461
462 rtic@6000 {
463 compatible = "fsl,sec-v4.0-rtic";
464 #address-cells = <1>;
465 #size-cells = <1>;
466 reg = <0x6000 0x100>;
467 ranges = <0x0 0x6100 0xe00>;
468
469 rtic_a: rtic-a@0 {
470 compatible = "fsl,sec-v4.0-rtic-memory";
471 reg = <0x00 0x20 0x100 0x80>;
472 };
473
474 rtic_b: rtic-b@20 {
475 compatible = "fsl,sec-v4.0-rtic-memory";
476 reg = <0x20 0x20 0x200 0x80>;
477 };
478
479 rtic_c: rtic-c@40 {
480 compatible = "fsl,sec-v4.0-rtic-memory";
481 reg = <0x40 0x20 0x300 0x80>;
482 };
483
484 rtic_d: rtic-d@60 {
485 compatible = "fsl,sec-v4.0-rtic-memory";
486 reg = <0x60 0x20 0x500 0x80>;
487 };
488 };
489 };
490
491 sec_mon: sec_mon@314000 {
492 compatible = "fsl,sec-v4.0-mon";
493 reg = <0x314000 0x1000>;
494 interrupt-parent = <&mpic>;
495 interrupts = <93 2>;
496 };
413 }; 497 };
414 498
415 rapidio0: rapidio@ffe0c0000 { 499 rapidio0: rapidio@ffe0c0000 {
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 1cf81d77c5a5..7f0b7cda6259 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
8obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o 8obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
9obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o 9obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
10obj-$(CONFIG_S390_PRNG) += prng.o 10obj-$(CONFIG_S390_PRNG) += prng.o
11obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 58f46734465f..a9ce135893f8 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -31,7 +31,8 @@
31#define AES_KEYLEN_192 2 31#define AES_KEYLEN_192 2
32#define AES_KEYLEN_256 4 32#define AES_KEYLEN_256 4
33 33
34static char keylen_flag = 0; 34static u8 *ctrblk;
35static char keylen_flag;
35 36
36struct s390_aes_ctx { 37struct s390_aes_ctx {
37 u8 iv[AES_BLOCK_SIZE]; 38 u8 iv[AES_BLOCK_SIZE];
@@ -45,6 +46,24 @@ struct s390_aes_ctx {
45 } fallback; 46 } fallback;
46}; 47};
47 48
49struct pcc_param {
50 u8 key[32];
51 u8 tweak[16];
52 u8 block[16];
53 u8 bit[16];
54 u8 xts[16];
55};
56
57struct s390_xts_ctx {
58 u8 key[32];
59 u8 xts_param[16];
60 struct pcc_param pcc;
61 long enc;
62 long dec;
63 int key_len;
64 struct crypto_blkcipher *fallback;
65};
66
48/* 67/*
49 * Check if the key_len is supported by the HW. 68 * Check if the key_len is supported by the HW.
50 * Returns 0 if it is, a positive number if it is not and software fallback is 69 * Returns 0 if it is, a positive number if it is not and software fallback is
@@ -504,15 +523,337 @@ static struct crypto_alg cbc_aes_alg = {
504 } 523 }
505}; 524};
506 525
526static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
527 unsigned int len)
528{
529 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
530 unsigned int ret;
531
532 xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
533 xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
534 CRYPTO_TFM_REQ_MASK);
535
536 ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
537 if (ret) {
538 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
539 tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
540 CRYPTO_TFM_RES_MASK);
541 }
542 return ret;
543}
544
545static int xts_fallback_decrypt(struct blkcipher_desc *desc,
546 struct scatterlist *dst, struct scatterlist *src,
547 unsigned int nbytes)
548{
549 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
550 struct crypto_blkcipher *tfm;
551 unsigned int ret;
552
553 tfm = desc->tfm;
554 desc->tfm = xts_ctx->fallback;
555
556 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
557
558 desc->tfm = tfm;
559 return ret;
560}
561
562static int xts_fallback_encrypt(struct blkcipher_desc *desc,
563 struct scatterlist *dst, struct scatterlist *src,
564 unsigned int nbytes)
565{
566 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
567 struct crypto_blkcipher *tfm;
568 unsigned int ret;
569
570 tfm = desc->tfm;
571 desc->tfm = xts_ctx->fallback;
572
573 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
574
575 desc->tfm = tfm;
576 return ret;
577}
578
579static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
580 unsigned int key_len)
581{
582 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
583 u32 *flags = &tfm->crt_flags;
584
585 switch (key_len) {
586 case 32:
587 xts_ctx->enc = KM_XTS_128_ENCRYPT;
588 xts_ctx->dec = KM_XTS_128_DECRYPT;
589 memcpy(xts_ctx->key + 16, in_key, 16);
590 memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
591 break;
592 case 48:
593 xts_ctx->enc = 0;
594 xts_ctx->dec = 0;
595 xts_fallback_setkey(tfm, in_key, key_len);
596 break;
597 case 64:
598 xts_ctx->enc = KM_XTS_256_ENCRYPT;
599 xts_ctx->dec = KM_XTS_256_DECRYPT;
600 memcpy(xts_ctx->key, in_key, 32);
601 memcpy(xts_ctx->pcc.key, in_key + 32, 32);
602 break;
603 default:
604 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
605 return -EINVAL;
606 }
607 xts_ctx->key_len = key_len;
608 return 0;
609}
610
611static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
612 struct s390_xts_ctx *xts_ctx,
613 struct blkcipher_walk *walk)
614{
615 unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
616 int ret = blkcipher_walk_virt(desc, walk);
617 unsigned int nbytes = walk->nbytes;
618 unsigned int n;
619 u8 *in, *out;
620 void *param;
621
622 if (!nbytes)
623 goto out;
624
625 memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
626 memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
627 memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
628 memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
629 param = xts_ctx->pcc.key + offset;
630 ret = crypt_s390_pcc(func, param);
631 BUG_ON(ret < 0);
632
633 memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
634 param = xts_ctx->key + offset;
635 do {
636 /* only use complete blocks */
637 n = nbytes & ~(AES_BLOCK_SIZE - 1);
638 out = walk->dst.virt.addr;
639 in = walk->src.virt.addr;
640
641 ret = crypt_s390_km(func, param, out, in, n);
642 BUG_ON(ret < 0 || ret != n);
643
644 nbytes &= AES_BLOCK_SIZE - 1;
645 ret = blkcipher_walk_done(desc, walk, nbytes);
646 } while ((nbytes = walk->nbytes));
647out:
648 return ret;
649}
650
651static int xts_aes_encrypt(struct blkcipher_desc *desc,
652 struct scatterlist *dst, struct scatterlist *src,
653 unsigned int nbytes)
654{
655 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
656 struct blkcipher_walk walk;
657
658 if (unlikely(xts_ctx->key_len == 48))
659 return xts_fallback_encrypt(desc, dst, src, nbytes);
660
661 blkcipher_walk_init(&walk, dst, src, nbytes);
662 return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
663}
664
665static int xts_aes_decrypt(struct blkcipher_desc *desc,
666 struct scatterlist *dst, struct scatterlist *src,
667 unsigned int nbytes)
668{
669 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
670 struct blkcipher_walk walk;
671
672 if (unlikely(xts_ctx->key_len == 48))
673 return xts_fallback_decrypt(desc, dst, src, nbytes);
674
675 blkcipher_walk_init(&walk, dst, src, nbytes);
676 return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
677}
678
679static int xts_fallback_init(struct crypto_tfm *tfm)
680{
681 const char *name = tfm->__crt_alg->cra_name;
682 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
683
684 xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
685 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
686
687 if (IS_ERR(xts_ctx->fallback)) {
688 pr_err("Allocating XTS fallback algorithm %s failed\n",
689 name);
690 return PTR_ERR(xts_ctx->fallback);
691 }
692 return 0;
693}
694
695static void xts_fallback_exit(struct crypto_tfm *tfm)
696{
697 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
698
699 crypto_free_blkcipher(xts_ctx->fallback);
700 xts_ctx->fallback = NULL;
701}
702
703static struct crypto_alg xts_aes_alg = {
704 .cra_name = "xts(aes)",
705 .cra_driver_name = "xts-aes-s390",
706 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
707 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
708 CRYPTO_ALG_NEED_FALLBACK,
709 .cra_blocksize = AES_BLOCK_SIZE,
710 .cra_ctxsize = sizeof(struct s390_xts_ctx),
711 .cra_type = &crypto_blkcipher_type,
712 .cra_module = THIS_MODULE,
713 .cra_list = LIST_HEAD_INIT(xts_aes_alg.cra_list),
714 .cra_init = xts_fallback_init,
715 .cra_exit = xts_fallback_exit,
716 .cra_u = {
717 .blkcipher = {
718 .min_keysize = 2 * AES_MIN_KEY_SIZE,
719 .max_keysize = 2 * AES_MAX_KEY_SIZE,
720 .ivsize = AES_BLOCK_SIZE,
721 .setkey = xts_aes_set_key,
722 .encrypt = xts_aes_encrypt,
723 .decrypt = xts_aes_decrypt,
724 }
725 }
726};
727
728static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
729 unsigned int key_len)
730{
731 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
732
733 switch (key_len) {
734 case 16:
735 sctx->enc = KMCTR_AES_128_ENCRYPT;
736 sctx->dec = KMCTR_AES_128_DECRYPT;
737 break;
738 case 24:
739 sctx->enc = KMCTR_AES_192_ENCRYPT;
740 sctx->dec = KMCTR_AES_192_DECRYPT;
741 break;
742 case 32:
743 sctx->enc = KMCTR_AES_256_ENCRYPT;
744 sctx->dec = KMCTR_AES_256_DECRYPT;
745 break;
746 }
747
748 return aes_set_key(tfm, in_key, key_len);
749}
750
751static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
752 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
753{
754 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
755 unsigned int i, n, nbytes;
756 u8 buf[AES_BLOCK_SIZE];
757 u8 *out, *in;
758
759 if (!walk->nbytes)
760 return ret;
761
762 memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
763 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
764 out = walk->dst.virt.addr;
765 in = walk->src.virt.addr;
766 while (nbytes >= AES_BLOCK_SIZE) {
767 /* only use complete blocks, max. PAGE_SIZE */
768 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
769 nbytes & ~(AES_BLOCK_SIZE - 1);
770 for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
771 memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
772 AES_BLOCK_SIZE);
773 crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
774 }
775 ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
776 BUG_ON(ret < 0 || ret != n);
777 if (n > AES_BLOCK_SIZE)
778 memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
779 AES_BLOCK_SIZE);
780 crypto_inc(ctrblk, AES_BLOCK_SIZE);
781 out += n;
782 in += n;
783 nbytes -= n;
784 }
785 ret = blkcipher_walk_done(desc, walk, nbytes);
786 }
787 /*
788 * final block may be < AES_BLOCK_SIZE, copy only nbytes
789 */
790 if (nbytes) {
791 out = walk->dst.virt.addr;
792 in = walk->src.virt.addr;
793 ret = crypt_s390_kmctr(func, sctx->key, buf, in,
794 AES_BLOCK_SIZE, ctrblk);
795 BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
796 memcpy(out, buf, nbytes);
797 crypto_inc(ctrblk, AES_BLOCK_SIZE);
798 ret = blkcipher_walk_done(desc, walk, 0);
799 }
800 memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
801 return ret;
802}
803
804static int ctr_aes_encrypt(struct blkcipher_desc *desc,
805 struct scatterlist *dst, struct scatterlist *src,
806 unsigned int nbytes)
807{
808 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
809 struct blkcipher_walk walk;
810
811 blkcipher_walk_init(&walk, dst, src, nbytes);
812 return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
813}
814
815static int ctr_aes_decrypt(struct blkcipher_desc *desc,
816 struct scatterlist *dst, struct scatterlist *src,
817 unsigned int nbytes)
818{
819 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
820 struct blkcipher_walk walk;
821
822 blkcipher_walk_init(&walk, dst, src, nbytes);
823 return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
824}
825
826static struct crypto_alg ctr_aes_alg = {
827 .cra_name = "ctr(aes)",
828 .cra_driver_name = "ctr-aes-s390",
829 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
830 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
831 .cra_blocksize = 1,
832 .cra_ctxsize = sizeof(struct s390_aes_ctx),
833 .cra_type = &crypto_blkcipher_type,
834 .cra_module = THIS_MODULE,
835 .cra_list = LIST_HEAD_INIT(ctr_aes_alg.cra_list),
836 .cra_u = {
837 .blkcipher = {
838 .min_keysize = AES_MIN_KEY_SIZE,
839 .max_keysize = AES_MAX_KEY_SIZE,
840 .ivsize = AES_BLOCK_SIZE,
841 .setkey = ctr_aes_set_key,
842 .encrypt = ctr_aes_encrypt,
843 .decrypt = ctr_aes_decrypt,
844 }
845 }
846};
847
507static int __init aes_s390_init(void) 848static int __init aes_s390_init(void)
508{ 849{
509 int ret; 850 int ret;
510 851
511 if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) 852 if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
512 keylen_flag |= AES_KEYLEN_128; 853 keylen_flag |= AES_KEYLEN_128;
513 if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) 854 if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
514 keylen_flag |= AES_KEYLEN_192; 855 keylen_flag |= AES_KEYLEN_192;
515 if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) 856 if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
516 keylen_flag |= AES_KEYLEN_256; 857 keylen_flag |= AES_KEYLEN_256;
517 858
518 if (!keylen_flag) 859 if (!keylen_flag)
@@ -535,9 +876,40 @@ static int __init aes_s390_init(void)
535 if (ret) 876 if (ret)
536 goto cbc_aes_err; 877 goto cbc_aes_err;
537 878
879 if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
880 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
881 crypt_s390_func_available(KM_XTS_256_ENCRYPT,
882 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
883 ret = crypto_register_alg(&xts_aes_alg);
884 if (ret)
885 goto xts_aes_err;
886 }
887
888 if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
889 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
890 crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
891 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
892 crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
893 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
894 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
895 if (!ctrblk) {
896 ret = -ENOMEM;
897 goto ctr_aes_err;
898 }
899 ret = crypto_register_alg(&ctr_aes_alg);
900 if (ret) {
901 free_page((unsigned long) ctrblk);
902 goto ctr_aes_err;
903 }
904 }
905
538out: 906out:
539 return ret; 907 return ret;
540 908
909ctr_aes_err:
910 crypto_unregister_alg(&xts_aes_alg);
911xts_aes_err:
912 crypto_unregister_alg(&cbc_aes_alg);
541cbc_aes_err: 913cbc_aes_err:
542 crypto_unregister_alg(&ecb_aes_alg); 914 crypto_unregister_alg(&ecb_aes_alg);
543ecb_aes_err: 915ecb_aes_err:
@@ -548,6 +920,9 @@ aes_err:
548 920
549static void __exit aes_s390_fini(void) 921static void __exit aes_s390_fini(void)
550{ 922{
923 crypto_unregister_alg(&ctr_aes_alg);
924 free_page((unsigned long) ctrblk);
925 crypto_unregister_alg(&xts_aes_alg);
551 crypto_unregister_alg(&cbc_aes_alg); 926 crypto_unregister_alg(&cbc_aes_alg);
552 crypto_unregister_alg(&ecb_aes_alg); 927 crypto_unregister_alg(&ecb_aes_alg);
553 crypto_unregister_alg(&aes_alg); 928 crypto_unregister_alg(&aes_alg);
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 7ee9a1b4ad9f..49676771bd66 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -24,13 +24,18 @@
24#define CRYPT_S390_PRIORITY 300 24#define CRYPT_S390_PRIORITY 300
25#define CRYPT_S390_COMPOSITE_PRIORITY 400 25#define CRYPT_S390_COMPOSITE_PRIORITY 400
26 26
27#define CRYPT_S390_MSA 0x1
28#define CRYPT_S390_MSA3 0x2
29#define CRYPT_S390_MSA4 0x4
30
27/* s390 cryptographic operations */ 31/* s390 cryptographic operations */
28enum crypt_s390_operations { 32enum crypt_s390_operations {
29 CRYPT_S390_KM = 0x0100, 33 CRYPT_S390_KM = 0x0100,
30 CRYPT_S390_KMC = 0x0200, 34 CRYPT_S390_KMC = 0x0200,
31 CRYPT_S390_KIMD = 0x0300, 35 CRYPT_S390_KIMD = 0x0300,
32 CRYPT_S390_KLMD = 0x0400, 36 CRYPT_S390_KLMD = 0x0400,
33 CRYPT_S390_KMAC = 0x0500 37 CRYPT_S390_KMAC = 0x0500,
38 CRYPT_S390_KMCTR = 0x0600
34}; 39};
35 40
36/* 41/*
@@ -51,6 +56,10 @@ enum crypt_s390_km_func {
51 KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80, 56 KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80,
52 KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14, 57 KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14,
53 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, 58 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
59 KM_XTS_128_ENCRYPT = CRYPT_S390_KM | 0x32,
60 KM_XTS_128_DECRYPT = CRYPT_S390_KM | 0x32 | 0x80,
61 KM_XTS_256_ENCRYPT = CRYPT_S390_KM | 0x34,
62 KM_XTS_256_DECRYPT = CRYPT_S390_KM | 0x34 | 0x80,
54}; 63};
55 64
56/* 65/*
@@ -75,6 +84,26 @@ enum crypt_s390_kmc_func {
75}; 84};
76 85
77/* 86/*
87 * function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
88 * instruction
89 */
90enum crypt_s390_kmctr_func {
91 KMCTR_QUERY = CRYPT_S390_KMCTR | 0x0,
92 KMCTR_DEA_ENCRYPT = CRYPT_S390_KMCTR | 0x1,
93 KMCTR_DEA_DECRYPT = CRYPT_S390_KMCTR | 0x1 | 0x80,
94 KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
95 KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
96 KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
97 KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
98 KMCTR_AES_128_ENCRYPT = CRYPT_S390_KMCTR | 0x12,
99 KMCTR_AES_128_DECRYPT = CRYPT_S390_KMCTR | 0x12 | 0x80,
100 KMCTR_AES_192_ENCRYPT = CRYPT_S390_KMCTR | 0x13,
101 KMCTR_AES_192_DECRYPT = CRYPT_S390_KMCTR | 0x13 | 0x80,
102 KMCTR_AES_256_ENCRYPT = CRYPT_S390_KMCTR | 0x14,
103 KMCTR_AES_256_DECRYPT = CRYPT_S390_KMCTR | 0x14 | 0x80,
104};
105
106/*
78 * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) 107 * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
79 * instruction 108 * instruction
80 */ 109 */
@@ -83,6 +112,7 @@ enum crypt_s390_kimd_func {
83 KIMD_SHA_1 = CRYPT_S390_KIMD | 1, 112 KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
84 KIMD_SHA_256 = CRYPT_S390_KIMD | 2, 113 KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
85 KIMD_SHA_512 = CRYPT_S390_KIMD | 3, 114 KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
115 KIMD_GHASH = CRYPT_S390_KIMD | 65,
86}; 116};
87 117
88/* 118/*
@@ -284,6 +314,45 @@ static inline int crypt_s390_kmac(long func, void *param,
284} 314}
285 315
286/** 316/**
317 * crypt_s390_kmctr:
318 * @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
319 * @param: address of parameter block; see POP for details on each func
320 * @dest: address of destination memory area
321 * @src: address of source memory area
322 * @src_len: length of src operand in bytes
323 * @counter: address of counter value
324 *
325 * Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
326 *
327 * Returns -1 for failure, 0 for the query func, number of processed
328 * bytes for encryption/decryption funcs
329 */
330static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
331 const u8 *src, long src_len, u8 *counter)
332{
333 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
334 register void *__param asm("1") = param;
335 register const u8 *__src asm("2") = src;
336 register long __src_len asm("3") = src_len;
337 register u8 *__dest asm("4") = dest;
338 register u8 *__ctr asm("6") = counter;
339 int ret = -1;
340
341 asm volatile(
342 "0: .insn rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */
343 "1: brc 1,0b \n" /* handle partial completion */
344 " la %0,0\n"
345 "2:\n"
346 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
347 : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
348 "+a" (__ctr)
349 : "d" (__func), "a" (__param) : "cc", "memory");
350 if (ret < 0)
351 return ret;
352 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
353}
354
355/**
287 * crypt_s390_func_available: 356 * crypt_s390_func_available:
288 * @func: the function code of the specific function; 0 if op in general 357 * @func: the function code of the specific function; 0 if op in general
289 * 358 *
@@ -291,13 +360,17 @@ static inline int crypt_s390_kmac(long func, void *param,
291 * 360 *
292 * Returns 1 if func available; 0 if func or op in general not available 361 * Returns 1 if func available; 0 if func or op in general not available
293 */ 362 */
294static inline int crypt_s390_func_available(int func) 363static inline int crypt_s390_func_available(int func,
364 unsigned int facility_mask)
295{ 365{
296 unsigned char status[16]; 366 unsigned char status[16];
297 int ret; 367 int ret;
298 368
299 /* check if CPACF facility (bit 17) is available */ 369 if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
300 if (!test_facility(17)) 370 return 0;
371 if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
372 return 0;
373 if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
301 return 0; 374 return 0;
302 375
303 switch (func & CRYPT_S390_OP_MASK) { 376 switch (func & CRYPT_S390_OP_MASK) {
@@ -316,6 +389,10 @@ static inline int crypt_s390_func_available(int func)
316 case CRYPT_S390_KMAC: 389 case CRYPT_S390_KMAC:
317 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); 390 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
318 break; 391 break;
392 case CRYPT_S390_KMCTR:
393 ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0,
394 NULL);
395 break;
319 default: 396 default:
320 return 0; 397 return 0;
321 } 398 }
@@ -326,4 +403,31 @@ static inline int crypt_s390_func_available(int func)
326 return (status[func >> 3] & (0x80 >> (func & 7))) != 0; 403 return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
327} 404}
328 405
406/**
407 * crypt_s390_pcc:
408 * @func: the function code passed to KM; see crypt_s390_km_func
409 * @param: address of parameter block; see POP for details on each func
410 *
411 * Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
412 *
413 * Returns -1 for failure, 0 for success.
414 */
415static inline int crypt_s390_pcc(long func, void *param)
416{
417 register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
418 register void *__param asm("1") = param;
419 int ret = -1;
420
421 asm volatile(
422 "0: .insn rre,0xb92c0000,0,0 \n" /* PCC opcode */
423 "1: brc 1,0b \n" /* handle partial completion */
424 " la %0,0\n"
425 "2:\n"
426 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
427 : "+d" (ret)
428 : "d" (__func), "a" (__param) : "cc", "memory");
429 return ret;
430}
431
432
329#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */ 433#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c
deleted file mode 100644
index 5706af266442..000000000000
--- a/arch/s390/crypto/des_check_key.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Function for checking keys for the DES and Tripple DES Encryption
5 * algorithms.
6 *
7 * Originally released as descore by Dana L. How <how@isl.stanford.edu>.
8 * Modified by Raimar Falke <rf13@inf.tu-dresden.de> for the Linux-Kernel.
9 * Derived from Cryptoapi and Nettle implementations, adapted for in-place
10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL.
11 *
12 * s390 Version:
13 * Copyright IBM Corp. 2003
14 * Author(s): Thomas Spatzier
15 * Jan Glauber (jan.glauber@de.ibm.com)
16 *
17 * Derived from "crypto/des.c"
18 * Copyright (c) 1992 Dana L. How.
19 * Copyright (c) Raimar Falke <rf13@inf.tu-dresden.de>
20 * Copyright (c) Gisle Sflensminde <gisle@ii.uib.no>
21 * Copyright (C) 2001 Niels Mvller.
22 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2 of the License, or
27 * (at your option) any later version.
28 *
29 */
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/errno.h>
33#include <linux/crypto.h>
34#include "crypto_des.h"
35
36#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o))
37
38static const u8 parity[] = {
39 8,1,0,8,0,8,8,0,0,8,8,0,8,0,2,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,3,
40 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
41 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
42 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
43 0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
44 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
45 8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
46 4,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,5,0,8,0,8,8,0,0,8,8,0,8,0,6,8,
47};
48
49/*
50 * RFC2451: Weak key checks SHOULD be performed.
51 */
52int
53crypto_des_check_key(const u8 *key, unsigned int keylen, u32 *flags)
54{
55 u32 n, w;
56
57 n = parity[key[0]]; n <<= 4;
58 n |= parity[key[1]]; n <<= 4;
59 n |= parity[key[2]]; n <<= 4;
60 n |= parity[key[3]]; n <<= 4;
61 n |= parity[key[4]]; n <<= 4;
62 n |= parity[key[5]]; n <<= 4;
63 n |= parity[key[6]]; n <<= 4;
64 n |= parity[key[7]];
65 w = 0x88888888L;
66
67 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY)
68 && !((n - (w >> 3)) & w)) { /* 1 in 10^10 keys passes this test */
69 if (n < 0x41415151) {
70 if (n < 0x31312121) {
71 if (n < 0x14141515) {
72 /* 01 01 01 01 01 01 01 01 */
73 if (n == 0x11111111) goto weak;
74 /* 01 1F 01 1F 01 0E 01 0E */
75 if (n == 0x13131212) goto weak;
76 } else {
77 /* 01 E0 01 E0 01 F1 01 F1 */
78 if (n == 0x14141515) goto weak;
79 /* 01 FE 01 FE 01 FE 01 FE */
80 if (n == 0x16161616) goto weak;
81 }
82 } else {
83 if (n < 0x34342525) {
84 /* 1F 01 1F 01 0E 01 0E 01 */
85 if (n == 0x31312121) goto weak;
86 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
87 if (n == 0x33332222) goto weak;
88 } else {
89 /* 1F E0 1F E0 0E F1 0E F1 */
90 if (n == 0x34342525) goto weak;
91 /* 1F FE 1F FE 0E FE 0E FE */
92 if (n == 0x36362626) goto weak;
93 }
94 }
95 } else {
96 if (n < 0x61616161) {
97 if (n < 0x44445555) {
98 /* E0 01 E0 01 F1 01 F1 01 */
99 if (n == 0x41415151) goto weak;
100 /* E0 1F E0 1F F1 0E F1 0E */
101 if (n == 0x43435252) goto weak;
102 } else {
103 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
104 if (n == 0x44445555) goto weak;
105 /* E0 FE E0 FE F1 FE F1 FE */
106 if (n == 0x46465656) goto weak;
107 }
108 } else {
109 if (n < 0x64646565) {
110 /* FE 01 FE 01 FE 01 FE 01 */
111 if (n == 0x61616161) goto weak;
112 /* FE 1F FE 1F FE 0E FE 0E */
113 if (n == 0x63636262) goto weak;
114 } else {
115 /* FE E0 FE E0 FE F1 FE F1 */
116 if (n == 0x64646565) goto weak;
117 /* FE FE FE FE FE FE FE FE */
118 if (n == 0x66666666) goto weak;
119 }
120 }
121 }
122 }
123 return 0;
124weak:
125 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
126 return -EINVAL;
127}
128
129EXPORT_SYMBOL(crypto_des_check_key);
130
131MODULE_LICENSE("GPL");
132MODULE_DESCRIPTION("Key Check function for DES & DES3 Cipher Algorithms");
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index cc5420118393..a52bfd124d86 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * s390 implementation of the DES Cipher Algorithm. 4 * s390 implementation of the DES Cipher Algorithm.
5 * 5 *
6 * Copyright IBM Corp. 2003,2007 6 * Copyright IBM Corp. 2003,2011
7 * Author(s): Thomas Spatzier 7 * Author(s): Thomas Spatzier
8 * Jan Glauber (jan.glauber@de.ibm.com) 8 * Jan Glauber (jan.glauber@de.ibm.com)
9 * 9 *
@@ -22,22 +22,19 @@
22 22
23#include "crypt_s390.h" 23#include "crypt_s390.h"
24 24
25#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE) 25#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
26 26
27struct crypt_s390_des_ctx { 27static u8 *ctrblk;
28 u8 iv[DES_BLOCK_SIZE];
29 u8 key[DES_KEY_SIZE];
30};
31 28
32struct crypt_s390_des3_192_ctx { 29struct s390_des_ctx {
33 u8 iv[DES_BLOCK_SIZE]; 30 u8 iv[DES_BLOCK_SIZE];
34 u8 key[DES3_192_KEY_SIZE]; 31 u8 key[DES3_KEY_SIZE];
35}; 32};
36 33
37static int des_setkey(struct crypto_tfm *tfm, const u8 *key, 34static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
38 unsigned int keylen) 35 unsigned int key_len)
39{ 36{
40 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); 37 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
41 u32 *flags = &tfm->crt_flags; 38 u32 *flags = &tfm->crt_flags;
42 u32 tmp[DES_EXPKEY_WORDS]; 39 u32 tmp[DES_EXPKEY_WORDS];
43 40
@@ -47,22 +44,22 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
47 return -EINVAL; 44 return -EINVAL;
48 } 45 }
49 46
50 memcpy(dctx->key, key, keylen); 47 memcpy(ctx->key, key, key_len);
51 return 0; 48 return 0;
52} 49}
53 50
54static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 51static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
55{ 52{
56 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); 53 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
57 54
58 crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 55 crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
59} 56}
60 57
61static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 58static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
62{ 59{
63 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); 60 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
64 61
65 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 62 crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
66} 63}
67 64
68static struct crypto_alg des_alg = { 65static struct crypto_alg des_alg = {
@@ -71,7 +68,7 @@ static struct crypto_alg des_alg = {
71 .cra_priority = CRYPT_S390_PRIORITY, 68 .cra_priority = CRYPT_S390_PRIORITY,
72 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 69 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
73 .cra_blocksize = DES_BLOCK_SIZE, 70 .cra_blocksize = DES_BLOCK_SIZE,
74 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), 71 .cra_ctxsize = sizeof(struct s390_des_ctx),
75 .cra_module = THIS_MODULE, 72 .cra_module = THIS_MODULE,
76 .cra_list = LIST_HEAD_INIT(des_alg.cra_list), 73 .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
77 .cra_u = { 74 .cra_u = {
@@ -86,7 +83,7 @@ static struct crypto_alg des_alg = {
86}; 83};
87 84
88static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, 85static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
89 void *param, struct blkcipher_walk *walk) 86 u8 *key, struct blkcipher_walk *walk)
90{ 87{
91 int ret = blkcipher_walk_virt(desc, walk); 88 int ret = blkcipher_walk_virt(desc, walk);
92 unsigned int nbytes; 89 unsigned int nbytes;
@@ -97,7 +94,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
97 u8 *out = walk->dst.virt.addr; 94 u8 *out = walk->dst.virt.addr;
98 u8 *in = walk->src.virt.addr; 95 u8 *in = walk->src.virt.addr;
99 96
100 ret = crypt_s390_km(func, param, out, in, n); 97 ret = crypt_s390_km(func, key, out, in, n);
101 BUG_ON((ret < 0) || (ret != n)); 98 BUG_ON((ret < 0) || (ret != n));
102 99
103 nbytes &= DES_BLOCK_SIZE - 1; 100 nbytes &= DES_BLOCK_SIZE - 1;
@@ -108,7 +105,7 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
108} 105}
109 106
110static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, 107static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
111 void *param, struct blkcipher_walk *walk) 108 u8 *iv, struct blkcipher_walk *walk)
112{ 109{
113 int ret = blkcipher_walk_virt(desc, walk); 110 int ret = blkcipher_walk_virt(desc, walk);
114 unsigned int nbytes = walk->nbytes; 111 unsigned int nbytes = walk->nbytes;
@@ -116,20 +113,20 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
116 if (!nbytes) 113 if (!nbytes)
117 goto out; 114 goto out;
118 115
119 memcpy(param, walk->iv, DES_BLOCK_SIZE); 116 memcpy(iv, walk->iv, DES_BLOCK_SIZE);
120 do { 117 do {
121 /* only use complete blocks */ 118 /* only use complete blocks */
122 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); 119 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
123 u8 *out = walk->dst.virt.addr; 120 u8 *out = walk->dst.virt.addr;
124 u8 *in = walk->src.virt.addr; 121 u8 *in = walk->src.virt.addr;
125 122
126 ret = crypt_s390_kmc(func, param, out, in, n); 123 ret = crypt_s390_kmc(func, iv, out, in, n);
127 BUG_ON((ret < 0) || (ret != n)); 124 BUG_ON((ret < 0) || (ret != n));
128 125
129 nbytes &= DES_BLOCK_SIZE - 1; 126 nbytes &= DES_BLOCK_SIZE - 1;
130 ret = blkcipher_walk_done(desc, walk, nbytes); 127 ret = blkcipher_walk_done(desc, walk, nbytes);
131 } while ((nbytes = walk->nbytes)); 128 } while ((nbytes = walk->nbytes));
132 memcpy(walk->iv, param, DES_BLOCK_SIZE); 129 memcpy(walk->iv, iv, DES_BLOCK_SIZE);
133 130
134out: 131out:
135 return ret; 132 return ret;
@@ -139,22 +136,22 @@ static int ecb_des_encrypt(struct blkcipher_desc *desc,
139 struct scatterlist *dst, struct scatterlist *src, 136 struct scatterlist *dst, struct scatterlist *src,
140 unsigned int nbytes) 137 unsigned int nbytes)
141{ 138{
142 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 139 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
143 struct blkcipher_walk walk; 140 struct blkcipher_walk walk;
144 141
145 blkcipher_walk_init(&walk, dst, src, nbytes); 142 blkcipher_walk_init(&walk, dst, src, nbytes);
146 return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk); 143 return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk);
147} 144}
148 145
149static int ecb_des_decrypt(struct blkcipher_desc *desc, 146static int ecb_des_decrypt(struct blkcipher_desc *desc,
150 struct scatterlist *dst, struct scatterlist *src, 147 struct scatterlist *dst, struct scatterlist *src,
151 unsigned int nbytes) 148 unsigned int nbytes)
152{ 149{
153 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 150 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
154 struct blkcipher_walk walk; 151 struct blkcipher_walk walk;
155 152
156 blkcipher_walk_init(&walk, dst, src, nbytes); 153 blkcipher_walk_init(&walk, dst, src, nbytes);
157 return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk); 154 return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk);
158} 155}
159 156
160static struct crypto_alg ecb_des_alg = { 157static struct crypto_alg ecb_des_alg = {
@@ -163,7 +160,7 @@ static struct crypto_alg ecb_des_alg = {
163 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 160 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
164 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 161 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
165 .cra_blocksize = DES_BLOCK_SIZE, 162 .cra_blocksize = DES_BLOCK_SIZE,
166 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), 163 .cra_ctxsize = sizeof(struct s390_des_ctx),
167 .cra_type = &crypto_blkcipher_type, 164 .cra_type = &crypto_blkcipher_type,
168 .cra_module = THIS_MODULE, 165 .cra_module = THIS_MODULE,
169 .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list), 166 .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
@@ -182,22 +179,22 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
182 struct scatterlist *dst, struct scatterlist *src, 179 struct scatterlist *dst, struct scatterlist *src,
183 unsigned int nbytes) 180 unsigned int nbytes)
184{ 181{
185 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 182 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
186 struct blkcipher_walk walk; 183 struct blkcipher_walk walk;
187 184
188 blkcipher_walk_init(&walk, dst, src, nbytes); 185 blkcipher_walk_init(&walk, dst, src, nbytes);
189 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk); 186 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
190} 187}
191 188
192static int cbc_des_decrypt(struct blkcipher_desc *desc, 189static int cbc_des_decrypt(struct blkcipher_desc *desc,
193 struct scatterlist *dst, struct scatterlist *src, 190 struct scatterlist *dst, struct scatterlist *src,
194 unsigned int nbytes) 191 unsigned int nbytes)
195{ 192{
196 struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 193 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
197 struct blkcipher_walk walk; 194 struct blkcipher_walk walk;
198 195
199 blkcipher_walk_init(&walk, dst, src, nbytes); 196 blkcipher_walk_init(&walk, dst, src, nbytes);
200 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk); 197 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
201} 198}
202 199
203static struct crypto_alg cbc_des_alg = { 200static struct crypto_alg cbc_des_alg = {
@@ -206,7 +203,7 @@ static struct crypto_alg cbc_des_alg = {
206 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 203 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
207 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 204 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
208 .cra_blocksize = DES_BLOCK_SIZE, 205 .cra_blocksize = DES_BLOCK_SIZE,
209 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), 206 .cra_ctxsize = sizeof(struct s390_des_ctx),
210 .cra_type = &crypto_blkcipher_type, 207 .cra_type = &crypto_blkcipher_type,
211 .cra_module = THIS_MODULE, 208 .cra_module = THIS_MODULE,
212 .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list), 209 .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
@@ -235,10 +232,10 @@ static struct crypto_alg cbc_des_alg = {
235 * property. 232 * property.
236 * 233 *
237 */ 234 */
238static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, 235static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
239 unsigned int keylen) 236 unsigned int key_len)
240{ 237{
241 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); 238 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
242 u32 *flags = &tfm->crt_flags; 239 u32 *flags = &tfm->crt_flags;
243 240
244 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && 241 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
@@ -248,141 +245,276 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
248 *flags |= CRYPTO_TFM_RES_WEAK_KEY; 245 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
249 return -EINVAL; 246 return -EINVAL;
250 } 247 }
251 memcpy(dctx->key, key, keylen); 248 memcpy(ctx->key, key, key_len);
252 return 0; 249 return 0;
253} 250}
254 251
255static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 252static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
256{ 253{
257 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); 254 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
258 255
259 crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, 256 crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
260 DES_BLOCK_SIZE);
261} 257}
262 258
263static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 259static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
264{ 260{
265 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); 261 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
266 262
267 crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, 263 crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
268 DES_BLOCK_SIZE);
269} 264}
270 265
271static struct crypto_alg des3_192_alg = { 266static struct crypto_alg des3_alg = {
272 .cra_name = "des3_ede", 267 .cra_name = "des3_ede",
273 .cra_driver_name = "des3_ede-s390", 268 .cra_driver_name = "des3_ede-s390",
274 .cra_priority = CRYPT_S390_PRIORITY, 269 .cra_priority = CRYPT_S390_PRIORITY,
275 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 270 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
276 .cra_blocksize = DES_BLOCK_SIZE, 271 .cra_blocksize = DES_BLOCK_SIZE,
277 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), 272 .cra_ctxsize = sizeof(struct s390_des_ctx),
278 .cra_module = THIS_MODULE, 273 .cra_module = THIS_MODULE,
279 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), 274 .cra_list = LIST_HEAD_INIT(des3_alg.cra_list),
280 .cra_u = { 275 .cra_u = {
281 .cipher = { 276 .cipher = {
282 .cia_min_keysize = DES3_192_KEY_SIZE, 277 .cia_min_keysize = DES3_KEY_SIZE,
283 .cia_max_keysize = DES3_192_KEY_SIZE, 278 .cia_max_keysize = DES3_KEY_SIZE,
284 .cia_setkey = des3_192_setkey, 279 .cia_setkey = des3_setkey,
285 .cia_encrypt = des3_192_encrypt, 280 .cia_encrypt = des3_encrypt,
286 .cia_decrypt = des3_192_decrypt, 281 .cia_decrypt = des3_decrypt,
287 } 282 }
288 } 283 }
289}; 284};
290 285
291static int ecb_des3_192_encrypt(struct blkcipher_desc *desc, 286static int ecb_des3_encrypt(struct blkcipher_desc *desc,
292 struct scatterlist *dst, 287 struct scatterlist *dst, struct scatterlist *src,
293 struct scatterlist *src, unsigned int nbytes) 288 unsigned int nbytes)
294{ 289{
295 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 290 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
296 struct blkcipher_walk walk; 291 struct blkcipher_walk walk;
297 292
298 blkcipher_walk_init(&walk, dst, src, nbytes); 293 blkcipher_walk_init(&walk, dst, src, nbytes);
299 return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk); 294 return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk);
300} 295}
301 296
302static int ecb_des3_192_decrypt(struct blkcipher_desc *desc, 297static int ecb_des3_decrypt(struct blkcipher_desc *desc,
303 struct scatterlist *dst, 298 struct scatterlist *dst, struct scatterlist *src,
304 struct scatterlist *src, unsigned int nbytes) 299 unsigned int nbytes)
305{ 300{
306 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 301 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
307 struct blkcipher_walk walk; 302 struct blkcipher_walk walk;
308 303
309 blkcipher_walk_init(&walk, dst, src, nbytes); 304 blkcipher_walk_init(&walk, dst, src, nbytes);
310 return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk); 305 return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk);
311} 306}
312 307
313static struct crypto_alg ecb_des3_192_alg = { 308static struct crypto_alg ecb_des3_alg = {
314 .cra_name = "ecb(des3_ede)", 309 .cra_name = "ecb(des3_ede)",
315 .cra_driver_name = "ecb-des3_ede-s390", 310 .cra_driver_name = "ecb-des3_ede-s390",
316 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 311 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
317 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 312 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
318 .cra_blocksize = DES_BLOCK_SIZE, 313 .cra_blocksize = DES_BLOCK_SIZE,
319 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), 314 .cra_ctxsize = sizeof(struct s390_des_ctx),
320 .cra_type = &crypto_blkcipher_type, 315 .cra_type = &crypto_blkcipher_type,
321 .cra_module = THIS_MODULE, 316 .cra_module = THIS_MODULE,
322 .cra_list = LIST_HEAD_INIT( 317 .cra_list = LIST_HEAD_INIT(
323 ecb_des3_192_alg.cra_list), 318 ecb_des3_alg.cra_list),
324 .cra_u = { 319 .cra_u = {
325 .blkcipher = { 320 .blkcipher = {
326 .min_keysize = DES3_192_KEY_SIZE, 321 .min_keysize = DES3_KEY_SIZE,
327 .max_keysize = DES3_192_KEY_SIZE, 322 .max_keysize = DES3_KEY_SIZE,
328 .setkey = des3_192_setkey, 323 .setkey = des3_setkey,
329 .encrypt = ecb_des3_192_encrypt, 324 .encrypt = ecb_des3_encrypt,
330 .decrypt = ecb_des3_192_decrypt, 325 .decrypt = ecb_des3_decrypt,
331 } 326 }
332 } 327 }
333}; 328};
334 329
335static int cbc_des3_192_encrypt(struct blkcipher_desc *desc, 330static int cbc_des3_encrypt(struct blkcipher_desc *desc,
336 struct scatterlist *dst, 331 struct scatterlist *dst, struct scatterlist *src,
337 struct scatterlist *src, unsigned int nbytes) 332 unsigned int nbytes)
338{ 333{
339 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 334 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
340 struct blkcipher_walk walk; 335 struct blkcipher_walk walk;
341 336
342 blkcipher_walk_init(&walk, dst, src, nbytes); 337 blkcipher_walk_init(&walk, dst, src, nbytes);
343 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk); 338 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
344} 339}
345 340
346static int cbc_des3_192_decrypt(struct blkcipher_desc *desc, 341static int cbc_des3_decrypt(struct blkcipher_desc *desc,
347 struct scatterlist *dst, 342 struct scatterlist *dst, struct scatterlist *src,
348 struct scatterlist *src, unsigned int nbytes) 343 unsigned int nbytes)
349{ 344{
350 struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 345 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
351 struct blkcipher_walk walk; 346 struct blkcipher_walk walk;
352 347
353 blkcipher_walk_init(&walk, dst, src, nbytes); 348 blkcipher_walk_init(&walk, dst, src, nbytes);
354 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk); 349 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
355} 350}
356 351
357static struct crypto_alg cbc_des3_192_alg = { 352static struct crypto_alg cbc_des3_alg = {
358 .cra_name = "cbc(des3_ede)", 353 .cra_name = "cbc(des3_ede)",
359 .cra_driver_name = "cbc-des3_ede-s390", 354 .cra_driver_name = "cbc-des3_ede-s390",
360 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, 355 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
361 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 356 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
362 .cra_blocksize = DES_BLOCK_SIZE, 357 .cra_blocksize = DES_BLOCK_SIZE,
363 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), 358 .cra_ctxsize = sizeof(struct s390_des_ctx),
364 .cra_type = &crypto_blkcipher_type, 359 .cra_type = &crypto_blkcipher_type,
365 .cra_module = THIS_MODULE, 360 .cra_module = THIS_MODULE,
366 .cra_list = LIST_HEAD_INIT( 361 .cra_list = LIST_HEAD_INIT(
367 cbc_des3_192_alg.cra_list), 362 cbc_des3_alg.cra_list),
368 .cra_u = { 363 .cra_u = {
369 .blkcipher = { 364 .blkcipher = {
370 .min_keysize = DES3_192_KEY_SIZE, 365 .min_keysize = DES3_KEY_SIZE,
371 .max_keysize = DES3_192_KEY_SIZE, 366 .max_keysize = DES3_KEY_SIZE,
372 .ivsize = DES_BLOCK_SIZE, 367 .ivsize = DES_BLOCK_SIZE,
373 .setkey = des3_192_setkey, 368 .setkey = des3_setkey,
374 .encrypt = cbc_des3_192_encrypt, 369 .encrypt = cbc_des3_encrypt,
375 .decrypt = cbc_des3_192_decrypt, 370 .decrypt = cbc_des3_decrypt,
376 } 371 }
377 } 372 }
378}; 373};
379 374
380static int des_s390_init(void) 375static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
376 struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
377{
378 int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
379 unsigned int i, n, nbytes;
380 u8 buf[DES_BLOCK_SIZE];
381 u8 *out, *in;
382
383 memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
384 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
385 out = walk->dst.virt.addr;
386 in = walk->src.virt.addr;
387 while (nbytes >= DES_BLOCK_SIZE) {
388 /* align to block size, max. PAGE_SIZE */
389 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
390 nbytes & ~(DES_BLOCK_SIZE - 1);
391 for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
392 memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
393 DES_BLOCK_SIZE);
394 crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
395 }
396 ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
397 BUG_ON((ret < 0) || (ret != n));
398 if (n > DES_BLOCK_SIZE)
399 memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
400 DES_BLOCK_SIZE);
401 crypto_inc(ctrblk, DES_BLOCK_SIZE);
402 out += n;
403 in += n;
404 nbytes -= n;
405 }
406 ret = blkcipher_walk_done(desc, walk, nbytes);
407 }
408
409 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
410 if (nbytes) {
411 out = walk->dst.virt.addr;
412 in = walk->src.virt.addr;
413 ret = crypt_s390_kmctr(func, ctx->key, buf, in,
414 DES_BLOCK_SIZE, ctrblk);
415 BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
416 memcpy(out, buf, nbytes);
417 crypto_inc(ctrblk, DES_BLOCK_SIZE);
418 ret = blkcipher_walk_done(desc, walk, 0);
419 }
420 memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
421 return ret;
422}
423
424static int ctr_des_encrypt(struct blkcipher_desc *desc,
425 struct scatterlist *dst, struct scatterlist *src,
426 unsigned int nbytes)
427{
428 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
429 struct blkcipher_walk walk;
430
431 blkcipher_walk_init(&walk, dst, src, nbytes);
432 return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk);
433}
434
435static int ctr_des_decrypt(struct blkcipher_desc *desc,
436 struct scatterlist *dst, struct scatterlist *src,
437 unsigned int nbytes)
438{
439 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
440 struct blkcipher_walk walk;
441
442 blkcipher_walk_init(&walk, dst, src, nbytes);
443 return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk);
444}
445
446static struct crypto_alg ctr_des_alg = {
447 .cra_name = "ctr(des)",
448 .cra_driver_name = "ctr-des-s390",
449 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
450 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
451 .cra_blocksize = 1,
452 .cra_ctxsize = sizeof(struct s390_des_ctx),
453 .cra_type = &crypto_blkcipher_type,
454 .cra_module = THIS_MODULE,
455 .cra_list = LIST_HEAD_INIT(ctr_des_alg.cra_list),
456 .cra_u = {
457 .blkcipher = {
458 .min_keysize = DES_KEY_SIZE,
459 .max_keysize = DES_KEY_SIZE,
460 .ivsize = DES_BLOCK_SIZE,
461 .setkey = des_setkey,
462 .encrypt = ctr_des_encrypt,
463 .decrypt = ctr_des_decrypt,
464 }
465 }
466};
467
468static int ctr_des3_encrypt(struct blkcipher_desc *desc,
469 struct scatterlist *dst, struct scatterlist *src,
470 unsigned int nbytes)
471{
472 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
473 struct blkcipher_walk walk;
474
475 blkcipher_walk_init(&walk, dst, src, nbytes);
476 return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk);
477}
478
479static int ctr_des3_decrypt(struct blkcipher_desc *desc,
480 struct scatterlist *dst, struct scatterlist *src,
481 unsigned int nbytes)
482{
483 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
484 struct blkcipher_walk walk;
485
486 blkcipher_walk_init(&walk, dst, src, nbytes);
487 return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk);
488}
489
490static struct crypto_alg ctr_des3_alg = {
491 .cra_name = "ctr(des3_ede)",
492 .cra_driver_name = "ctr-des3_ede-s390",
493 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
494 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
495 .cra_blocksize = 1,
496 .cra_ctxsize = sizeof(struct s390_des_ctx),
497 .cra_type = &crypto_blkcipher_type,
498 .cra_module = THIS_MODULE,
499 .cra_list = LIST_HEAD_INIT(ctr_des3_alg.cra_list),
500 .cra_u = {
501 .blkcipher = {
502 .min_keysize = DES3_KEY_SIZE,
503 .max_keysize = DES3_KEY_SIZE,
504 .ivsize = DES_BLOCK_SIZE,
505 .setkey = des3_setkey,
506 .encrypt = ctr_des3_encrypt,
507 .decrypt = ctr_des3_decrypt,
508 }
509 }
510};
511
512static int __init des_s390_init(void)
381{ 513{
382 int ret; 514 int ret;
383 515
384 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || 516 if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
385 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) 517 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
386 return -EOPNOTSUPP; 518 return -EOPNOTSUPP;
387 519
388 ret = crypto_register_alg(&des_alg); 520 ret = crypto_register_alg(&des_alg);
@@ -394,23 +526,46 @@ static int des_s390_init(void)
394 ret = crypto_register_alg(&cbc_des_alg); 526 ret = crypto_register_alg(&cbc_des_alg);
395 if (ret) 527 if (ret)
396 goto cbc_des_err; 528 goto cbc_des_err;
397 ret = crypto_register_alg(&des3_192_alg); 529 ret = crypto_register_alg(&des3_alg);
398 if (ret) 530 if (ret)
399 goto des3_192_err; 531 goto des3_err;
400 ret = crypto_register_alg(&ecb_des3_192_alg); 532 ret = crypto_register_alg(&ecb_des3_alg);
401 if (ret) 533 if (ret)
402 goto ecb_des3_192_err; 534 goto ecb_des3_err;
403 ret = crypto_register_alg(&cbc_des3_192_alg); 535 ret = crypto_register_alg(&cbc_des3_alg);
404 if (ret) 536 if (ret)
405 goto cbc_des3_192_err; 537 goto cbc_des3_err;
538
539 if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
540 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
541 crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
542 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
543 ret = crypto_register_alg(&ctr_des_alg);
544 if (ret)
545 goto ctr_des_err;
546 ret = crypto_register_alg(&ctr_des3_alg);
547 if (ret)
548 goto ctr_des3_err;
549 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
550 if (!ctrblk) {
551 ret = -ENOMEM;
552 goto ctr_mem_err;
553 }
554 }
406out: 555out:
407 return ret; 556 return ret;
408 557
409cbc_des3_192_err: 558ctr_mem_err:
410 crypto_unregister_alg(&ecb_des3_192_alg); 559 crypto_unregister_alg(&ctr_des3_alg);
411ecb_des3_192_err: 560ctr_des3_err:
412 crypto_unregister_alg(&des3_192_alg); 561 crypto_unregister_alg(&ctr_des_alg);
413des3_192_err: 562ctr_des_err:
563 crypto_unregister_alg(&cbc_des3_alg);
564cbc_des3_err:
565 crypto_unregister_alg(&ecb_des3_alg);
566ecb_des3_err:
567 crypto_unregister_alg(&des3_alg);
568des3_err:
414 crypto_unregister_alg(&cbc_des_alg); 569 crypto_unregister_alg(&cbc_des_alg);
415cbc_des_err: 570cbc_des_err:
416 crypto_unregister_alg(&ecb_des_alg); 571 crypto_unregister_alg(&ecb_des_alg);
@@ -422,9 +577,14 @@ des_err:
422 577
423static void __exit des_s390_exit(void) 578static void __exit des_s390_exit(void)
424{ 579{
425 crypto_unregister_alg(&cbc_des3_192_alg); 580 if (ctrblk) {
426 crypto_unregister_alg(&ecb_des3_192_alg); 581 crypto_unregister_alg(&ctr_des_alg);
427 crypto_unregister_alg(&des3_192_alg); 582 crypto_unregister_alg(&ctr_des3_alg);
583 free_page((unsigned long) ctrblk);
584 }
585 crypto_unregister_alg(&cbc_des3_alg);
586 crypto_unregister_alg(&ecb_des3_alg);
587 crypto_unregister_alg(&des3_alg);
428 crypto_unregister_alg(&cbc_des_alg); 588 crypto_unregister_alg(&cbc_des_alg);
429 crypto_unregister_alg(&ecb_des_alg); 589 crypto_unregister_alg(&ecb_des_alg);
430 crypto_unregister_alg(&des_alg); 590 crypto_unregister_alg(&des_alg);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
new file mode 100644
index 000000000000..b1bd170f24b1
--- /dev/null
+++ b/arch/s390/crypto/ghash_s390.c
@@ -0,0 +1,162 @@
1/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the GHASH algorithm for GCM (Galois/Counter Mode).
5 *
6 * Copyright IBM Corp. 2011
7 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
8 */
9
10#include <crypto/internal/hash.h>
11#include <linux/module.h>
12
13#include "crypt_s390.h"
14
15#define GHASH_BLOCK_SIZE 16
16#define GHASH_DIGEST_SIZE 16
17
18struct ghash_ctx {
19 u8 icv[16];
20 u8 key[16];
21};
22
23struct ghash_desc_ctx {
24 u8 buffer[GHASH_BLOCK_SIZE];
25 u32 bytes;
26};
27
28static int ghash_init(struct shash_desc *desc)
29{
30 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
31
32 memset(dctx, 0, sizeof(*dctx));
33
34 return 0;
35}
36
37static int ghash_setkey(struct crypto_shash *tfm,
38 const u8 *key, unsigned int keylen)
39{
40 struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
41
42 if (keylen != GHASH_BLOCK_SIZE) {
43 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
44 return -EINVAL;
45 }
46
47 memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
48 memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
49
50 return 0;
51}
52
53static int ghash_update(struct shash_desc *desc,
54 const u8 *src, unsigned int srclen)
55{
56 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
57 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
58 unsigned int n;
59 u8 *buf = dctx->buffer;
60 int ret;
61
62 if (dctx->bytes) {
63 u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
64
65 n = min(srclen, dctx->bytes);
66 dctx->bytes -= n;
67 srclen -= n;
68
69 memcpy(pos, src, n);
70 src += n;
71
72 if (!dctx->bytes) {
73 ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
74 GHASH_BLOCK_SIZE);
75 BUG_ON(ret != GHASH_BLOCK_SIZE);
76 }
77 }
78
79 n = srclen & ~(GHASH_BLOCK_SIZE - 1);
80 if (n) {
81 ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
82 BUG_ON(ret != n);
83 src += n;
84 srclen -= n;
85 }
86
87 if (srclen) {
88 dctx->bytes = GHASH_BLOCK_SIZE - srclen;
89 memcpy(buf, src, srclen);
90 }
91
92 return 0;
93}
94
95static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
96{
97 u8 *buf = dctx->buffer;
98 int ret;
99
100 if (dctx->bytes) {
101 u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
102
103 memset(pos, 0, dctx->bytes);
104
105 ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
106 BUG_ON(ret != GHASH_BLOCK_SIZE);
107 }
108
109 dctx->bytes = 0;
110}
111
112static int ghash_final(struct shash_desc *desc, u8 *dst)
113{
114 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
115 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
116
117 ghash_flush(ctx, dctx);
118 memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
119
120 return 0;
121}
122
123static struct shash_alg ghash_alg = {
124 .digestsize = GHASH_DIGEST_SIZE,
125 .init = ghash_init,
126 .update = ghash_update,
127 .final = ghash_final,
128 .setkey = ghash_setkey,
129 .descsize = sizeof(struct ghash_desc_ctx),
130 .base = {
131 .cra_name = "ghash",
132 .cra_driver_name = "ghash-s390",
133 .cra_priority = CRYPT_S390_PRIORITY,
134 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
135 .cra_blocksize = GHASH_BLOCK_SIZE,
136 .cra_ctxsize = sizeof(struct ghash_ctx),
137 .cra_module = THIS_MODULE,
138 .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
139 },
140};
141
142static int __init ghash_mod_init(void)
143{
144 if (!crypt_s390_func_available(KIMD_GHASH,
145 CRYPT_S390_MSA | CRYPT_S390_MSA4))
146 return -EOPNOTSUPP;
147
148 return crypto_register_shash(&ghash_alg);
149}
150
151static void __exit ghash_mod_exit(void)
152{
153 crypto_unregister_shash(&ghash_alg);
154}
155
156module_init(ghash_mod_init);
157module_exit(ghash_mod_exit);
158
159MODULE_ALIAS("ghash");
160
161MODULE_LICENSE("GPL");
162MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 8b16c479585b..0808fbf0f7d3 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -166,7 +166,7 @@ static int __init prng_init(void)
166 int ret; 166 int ret;
167 167
168 /* check if the CPU has a PRNG */ 168 /* check if the CPU has a PRNG */
169 if (!crypt_s390_func_available(KMC_PRNG)) 169 if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
170 return -EOPNOTSUPP; 170 return -EOPNOTSUPP;
171 171
172 if (prng_chunk_size < 8) 172 if (prng_chunk_size < 8)
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index f6de7826c979..e9868c6e0a08 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -90,7 +90,7 @@ static struct shash_alg alg = {
90 90
91static int __init sha1_s390_init(void) 91static int __init sha1_s390_init(void)
92{ 92{
93 if (!crypt_s390_func_available(KIMD_SHA_1)) 93 if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
94 return -EOPNOTSUPP; 94 return -EOPNOTSUPP;
95 return crypto_register_shash(&alg); 95 return crypto_register_shash(&alg);
96} 96}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 61a7db372121..5ed8d64fc2ed 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -86,7 +86,7 @@ static struct shash_alg alg = {
86 86
87static int sha256_s390_init(void) 87static int sha256_s390_init(void)
88{ 88{
89 if (!crypt_s390_func_available(KIMD_SHA_256)) 89 if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
90 return -EOPNOTSUPP; 90 return -EOPNOTSUPP;
91 91
92 return crypto_register_shash(&alg); 92 return crypto_register_shash(&alg);
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 4bf73d0dc525..32a81383b69c 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -132,7 +132,7 @@ static int __init init(void)
132{ 132{
133 int ret; 133 int ret;
134 134
135 if (!crypt_s390_func_available(KIMD_SHA_512)) 135 if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
136 return -EOPNOTSUPP; 136 return -EOPNOTSUPP;
137 if ((ret = crypto_register_shash(&sha512_alg)) < 0) 137 if ((ret = crypto_register_shash(&sha512_alg)) < 0)
138 goto out; 138 goto out;
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 1a58ad89fdf7..c04f1b7a9139 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -2,8 +2,6 @@
2# Arch-specific CryptoAPI modules. 2# Arch-specific CryptoAPI modules.
3# 3#
4 4
5obj-$(CONFIG_CRYPTO_FPU) += fpu.o
6
7obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o 5obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
8obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o 6obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
9obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o 7obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
@@ -24,6 +22,6 @@ aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
24twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o 22twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
25salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o 23salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
26 24
27aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o 25aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
28 26
29ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o 27ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 2577613fb32b..feee8ff1d05e 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -94,6 +94,10 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv); 94 const u8 *in, unsigned int len, u8 *iv);
95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, 95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv); 96 const u8 *in, unsigned int len, u8 *iv);
97
98int crypto_fpu_init(void);
99void crypto_fpu_exit(void);
100
97#ifdef CONFIG_X86_64 101#ifdef CONFIG_X86_64
98asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, 102asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
99 const u8 *in, unsigned int len, u8 *iv); 103 const u8 *in, unsigned int len, u8 *iv);
@@ -1257,6 +1261,8 @@ static int __init aesni_init(void)
1257 return -ENODEV; 1261 return -ENODEV;
1258 } 1262 }
1259 1263
1264 if ((err = crypto_fpu_init()))
1265 goto fpu_err;
1260 if ((err = crypto_register_alg(&aesni_alg))) 1266 if ((err = crypto_register_alg(&aesni_alg)))
1261 goto aes_err; 1267 goto aes_err;
1262 if ((err = crypto_register_alg(&__aesni_alg))) 1268 if ((err = crypto_register_alg(&__aesni_alg)))
@@ -1334,6 +1340,7 @@ blk_ecb_err:
1334__aes_err: 1340__aes_err:
1335 crypto_unregister_alg(&aesni_alg); 1341 crypto_unregister_alg(&aesni_alg);
1336aes_err: 1342aes_err:
1343fpu_err:
1337 return err; 1344 return err;
1338} 1345}
1339 1346
@@ -1363,6 +1370,8 @@ static void __exit aesni_exit(void)
1363 crypto_unregister_alg(&blk_ecb_alg); 1370 crypto_unregister_alg(&blk_ecb_alg);
1364 crypto_unregister_alg(&__aesni_alg); 1371 crypto_unregister_alg(&__aesni_alg);
1365 crypto_unregister_alg(&aesni_alg); 1372 crypto_unregister_alg(&aesni_alg);
1373
1374 crypto_fpu_exit();
1366} 1375}
1367 1376
1368module_init(aesni_init); 1377module_init(aesni_init);
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index 1a8f8649c035..98d7a188f46b 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -150,18 +150,12 @@ static struct crypto_template crypto_fpu_tmpl = {
150 .module = THIS_MODULE, 150 .module = THIS_MODULE,
151}; 151};
152 152
153static int __init crypto_fpu_module_init(void) 153int __init crypto_fpu_init(void)
154{ 154{
155 return crypto_register_template(&crypto_fpu_tmpl); 155 return crypto_register_template(&crypto_fpu_tmpl);
156} 156}
157 157
158static void __exit crypto_fpu_module_exit(void) 158void __exit crypto_fpu_exit(void)
159{ 159{
160 crypto_unregister_template(&crypto_fpu_tmpl); 160 crypto_unregister_template(&crypto_fpu_tmpl);
161} 161}
162
163module_init(crypto_fpu_module_init);
164module_exit(crypto_fpu_module_exit);
165
166MODULE_LICENSE("GPL");
167MODULE_DESCRIPTION("FPU block cipher wrapper");
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 4b7cb0e691cd..87b22ca9c223 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -264,11 +264,6 @@ config CRYPTO_XTS
264 key size 256, 384 or 512 bits. This implementation currently 264 key size 256, 384 or 512 bits. This implementation currently
265 can't handle a sectorsize which is not a multiple of 16 bytes. 265 can't handle a sectorsize which is not a multiple of 16 bytes.
266 266
267config CRYPTO_FPU
268 tristate
269 select CRYPTO_BLKCIPHER
270 select CRYPTO_MANAGER
271
272comment "Hash modes" 267comment "Hash modes"
273 268
274config CRYPTO_HMAC 269config CRYPTO_HMAC
@@ -543,7 +538,6 @@ config CRYPTO_AES_NI_INTEL
543 select CRYPTO_AES_586 if !64BIT 538 select CRYPTO_AES_586 if !64BIT
544 select CRYPTO_CRYPTD 539 select CRYPTO_CRYPTD
545 select CRYPTO_ALGAPI 540 select CRYPTO_ALGAPI
546 select CRYPTO_FPU
547 help 541 help
548 Use Intel AES-NI instructions for AES algorithm. 542 Use Intel AES-NI instructions for AES algorithm.
549 543
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e912ea5def3d..2222617b3bed 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1009,6 +1009,10 @@ static int do_test(int m)
1009 speed_template_32_48_64); 1009 speed_template_32_48_64);
1010 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, 1010 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
1011 speed_template_32_48_64); 1011 speed_template_32_48_64);
1012 test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
1013 speed_template_16_24_32);
1014 test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
1015 speed_template_16_24_32);
1012 break; 1016 break;
1013 1017
1014 case 201: 1018 case 201:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 2854865f2434..b6b93d416351 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2219,6 +2219,22 @@ static const struct alg_test_desc alg_test_descs[] = {
2219 } 2219 }
2220 } 2220 }
2221 }, { 2221 }, {
2222 .alg = "ofb(aes)",
2223 .test = alg_test_skcipher,
2224 .fips_allowed = 1,
2225 .suite = {
2226 .cipher = {
2227 .enc = {
2228 .vecs = aes_ofb_enc_tv_template,
2229 .count = AES_OFB_ENC_TEST_VECTORS
2230 },
2231 .dec = {
2232 .vecs = aes_ofb_dec_tv_template,
2233 .count = AES_OFB_DEC_TEST_VECTORS
2234 }
2235 }
2236 }
2237 }, {
2222 .alg = "pcbc(fcrypt)", 2238 .alg = "pcbc(fcrypt)",
2223 .test = alg_test_skcipher, 2239 .test = alg_test_skcipher,
2224 .suite = { 2240 .suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index aa6dac05f843..27e60619538e 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -2980,6 +2980,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
2980#define AES_XTS_DEC_TEST_VECTORS 4 2980#define AES_XTS_DEC_TEST_VECTORS 4
2981#define AES_CTR_ENC_TEST_VECTORS 3 2981#define AES_CTR_ENC_TEST_VECTORS 3
2982#define AES_CTR_DEC_TEST_VECTORS 3 2982#define AES_CTR_DEC_TEST_VECTORS 3
2983#define AES_OFB_ENC_TEST_VECTORS 1
2984#define AES_OFB_DEC_TEST_VECTORS 1
2983#define AES_CTR_3686_ENC_TEST_VECTORS 7 2985#define AES_CTR_3686_ENC_TEST_VECTORS 7
2984#define AES_CTR_3686_DEC_TEST_VECTORS 6 2986#define AES_CTR_3686_DEC_TEST_VECTORS 6
2985#define AES_GCM_ENC_TEST_VECTORS 9 2987#define AES_GCM_ENC_TEST_VECTORS 9
@@ -5506,6 +5508,64 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5506 }, 5508 },
5507}; 5509};
5508 5510
5511static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5512 /* From NIST Special Publication 800-38A, Appendix F.5 */
5513 {
5514 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5515 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5516 .klen = 16,
5517 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07\x08"
5518 "\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5519 .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
5520 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
5521 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
5522 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
5523 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
5524 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
5525 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
5526 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
5527 .ilen = 64,
5528 .result = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
5529 "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
5530 "\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5"
5531 "\x3c\x52\xda\xc5\x4e\xd8\x25"
5532 "\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43"
5533 "\x44\xf7\xa8\x22\x60\xed\xcc"
5534 "\x30\x4c\x65\x28\xf6\x59\xc7\x78"
5535 "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
5536 .rlen = 64,
5537 }
5538};
5539
5540static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5541 /* From NIST Special Publication 800-38A, Appendix F.5 */
5542 {
5543 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5544 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5545 .klen = 16,
5546 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07\x08"
5547 "\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5548 .input = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
5549 "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
5550 "\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5"
5551 "\x3c\x52\xda\xc5\x4e\xd8\x25"
5552 "\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43"
5553 "\x44\xf7\xa8\x22\x60\xed\xcc"
5554 "\x30\x4c\x65\x28\xf6\x59\xc7\x78"
5555 "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
5556 .ilen = 64,
5557 .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
5558 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
5559 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
5560 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
5561 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
5562 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
5563 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
5564 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
5565 .rlen = 64,
5566 }
5567};
5568
5509static struct aead_testvec aes_gcm_enc_tv_template[] = { 5569static struct aead_testvec aes_gcm_enc_tv_template[] = {
5510 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ 5570 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5511 .key = zeroed_string, 5571 .key = zeroed_string,
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index beecd1cf9b99..a60043b3e409 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -49,7 +49,7 @@ config HW_RANDOM_INTEL
49 49
50config HW_RANDOM_AMD 50config HW_RANDOM_AMD
51 tristate "AMD HW Random Number Generator support" 51 tristate "AMD HW Random Number Generator support"
52 depends on HW_RANDOM && X86 && PCI 52 depends on HW_RANDOM && (X86 || PPC_MAPLE) && PCI
53 default HW_RANDOM 53 default HW_RANDOM
54 ---help--- 54 ---help---
55 This driver provides kernel-side support for the Random Number 55 This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 0d8c5788b8e4..c6af038682f1 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -133,6 +133,12 @@ found:
133 pmbase &= 0x0000FF00; 133 pmbase &= 0x0000FF00;
134 if (pmbase == 0) 134 if (pmbase == 0)
135 goto out; 135 goto out;
136 if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
137 dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
138 pmbase + 0xF0);
139 err = -EBUSY;
140 goto out;
141 }
136 amd_rng.priv = (unsigned long)pmbase; 142 amd_rng.priv = (unsigned long)pmbase;
137 amd_pdev = pdev; 143 amd_pdev = pdev;
138 144
@@ -141,6 +147,7 @@ found:
141 if (err) { 147 if (err) {
142 printk(KERN_ERR PFX "RNG registering failed (%d)\n", 148 printk(KERN_ERR PFX "RNG registering failed (%d)\n",
143 err); 149 err);
150 release_region(pmbase + 0xF0, 8);
144 goto out; 151 goto out;
145 } 152 }
146out: 153out:
@@ -149,6 +156,8 @@ out:
149 156
150static void __exit mod_exit(void) 157static void __exit mod_exit(void)
151{ 158{
159 u32 pmbase = (unsigned long)amd_rng.priv;
160 release_region(pmbase + 0xF0, 8);
152 hwrng_unregister(&amd_rng); 161 hwrng_unregister(&amd_rng);
153} 162}
154 163
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e54185223c8c..c64c3807f516 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -91,6 +91,8 @@ config CRYPTO_SHA1_S390
91 This is the s390 hardware accelerated implementation of the 91 This is the s390 hardware accelerated implementation of the
92 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 92 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
93 93
94 It is available as of z990.
95
94config CRYPTO_SHA256_S390 96config CRYPTO_SHA256_S390
95 tristate "SHA256 digest algorithm" 97 tristate "SHA256 digest algorithm"
96 depends on S390 98 depends on S390
@@ -99,8 +101,7 @@ config CRYPTO_SHA256_S390
99 This is the s390 hardware accelerated implementation of the 101 This is the s390 hardware accelerated implementation of the
100 SHA256 secure hash standard (DFIPS 180-2). 102 SHA256 secure hash standard (DFIPS 180-2).
101 103
102 This version of SHA implements a 256 bit hash with 128 bits of 104 It is available as of z9.
103 security against collision attacks.
104 105
105config CRYPTO_SHA512_S390 106config CRYPTO_SHA512_S390
106 tristate "SHA384 and SHA512 digest algorithm" 107 tristate "SHA384 and SHA512 digest algorithm"
@@ -110,10 +111,7 @@ config CRYPTO_SHA512_S390
110 This is the s390 hardware accelerated implementation of the 111 This is the s390 hardware accelerated implementation of the
111 SHA512 secure hash standard. 112 SHA512 secure hash standard.
112 113
113 This version of SHA implements a 512 bit hash with 256 bits of 114 It is available as of z10.
114 security against collision attacks. The code also includes SHA-384,
115 a 384 bit hash with 192 bits of security against collision attacks.
116
117 115
118config CRYPTO_DES_S390 116config CRYPTO_DES_S390
119 tristate "DES and Triple DES cipher algorithms" 117 tristate "DES and Triple DES cipher algorithms"
@@ -121,9 +119,12 @@ config CRYPTO_DES_S390
121 select CRYPTO_ALGAPI 119 select CRYPTO_ALGAPI
122 select CRYPTO_BLKCIPHER 120 select CRYPTO_BLKCIPHER
123 help 121 help
124 This us the s390 hardware accelerated implementation of the 122 This is the s390 hardware accelerated implementation of the
125 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 123 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
126 124
125 As of z990 the ECB and CBC mode are hardware accelerated.
126 As of z196 the CTR mode is hardware accelerated.
127
127config CRYPTO_AES_S390 128config CRYPTO_AES_S390
128 tristate "AES cipher algorithms" 129 tristate "AES cipher algorithms"
129 depends on S390 130 depends on S390
@@ -131,20 +132,15 @@ config CRYPTO_AES_S390
131 select CRYPTO_BLKCIPHER 132 select CRYPTO_BLKCIPHER
132 help 133 help
133 This is the s390 hardware accelerated implementation of the 134 This is the s390 hardware accelerated implementation of the
134 AES cipher algorithms (FIPS-197). AES uses the Rijndael 135 AES cipher algorithms (FIPS-197).
135 algorithm.
136
137 Rijndael appears to be consistently a very good performer in
138 both hardware and software across a wide range of computing
139 environments regardless of its use in feedback or non-feedback
140 modes. Its key setup time is excellent, and its key agility is
141 good. Rijndael's very low memory requirements make it very well
142 suited for restricted-space environments, in which it also
143 demonstrates excellent performance. Rijndael's operations are
144 among the easiest to defend against power and timing attacks.
145 136
146 On s390 the System z9-109 currently only supports the key size 137 As of z9 the ECB and CBC modes are hardware accelerated
147 of 128 bit. 138 for 128 bit keys.
139 As of z10 the ECB and CBC modes are hardware accelerated
140 for all AES key sizes.
141 As of z196 the CTR mode is hardware accelerated for all AES
142 key sizes and XTS mode is hardware accelerated for 256 and
143 512 bit keys.
148 144
149config S390_PRNG 145config S390_PRNG
150 tristate "Pseudo random number generator device driver" 146 tristate "Pseudo random number generator device driver"
@@ -154,8 +150,20 @@ config S390_PRNG
154 Select this option if you want to use the s390 pseudo random number 150 Select this option if you want to use the s390 pseudo random number
155 generator. The PRNG is part of the cryptographic processor functions 151 generator. The PRNG is part of the cryptographic processor functions
156 and uses triple-DES to generate secure random numbers like the 152 and uses triple-DES to generate secure random numbers like the
157 ANSI X9.17 standard. The PRNG is usable via the char device 153 ANSI X9.17 standard. User-space programs access the
158 /dev/prandom. 154 pseudo-random-number device through the char device /dev/prandom.
155
156 It is available as of z9.
157
158config CRYPTO_GHASH_S390
159 tristate "GHASH digest algorithm"
160 depends on S390
161 select CRYPTO_HASH
162 help
163 This is the s390 hardware accelerated implementation of the
164 GHASH message digest algorithm for GCM (Galois/Counter Mode).
165
166 It is available as of z196.
159 167
160config CRYPTO_DEV_MV_CESA 168config CRYPTO_DEV_MV_CESA
161 tristate "Marvell's Cryptographic Engine" 169 tristate "Marvell's Cryptographic Engine"
@@ -200,6 +208,8 @@ config CRYPTO_DEV_HIFN_795X_RNG
200 Select this option if you want to enable the random number generator 208 Select this option if you want to enable the random number generator
201 on the HIFN 795x crypto adapters. 209 on the HIFN 795x crypto adapters.
202 210
211source drivers/crypto/caam/Kconfig
212
203config CRYPTO_DEV_TALITOS 213config CRYPTO_DEV_TALITOS
204 tristate "Talitos Freescale Security Engine (SEC)" 214 tristate "Talitos Freescale Security Engine (SEC)"
205 select CRYPTO_ALGAPI 215 select CRYPTO_ALGAPI
@@ -269,4 +279,15 @@ config CRYPTO_DEV_PICOXCELL
269 279
270 Saying m here will build a module named pipcoxcell_crypto. 280 Saying m here will build a module named pipcoxcell_crypto.
271 281
282config CRYPTO_DEV_S5P
283 tristate "Support for Samsung S5PV210 crypto accelerator"
284 depends on ARCH_S5PV210
285 select CRYPTO_AES
286 select CRYPTO_ALGAPI
287 select CRYPTO_BLKCIPHER
288 help
289 This option allows you to have support for S5P crypto acceleration.
290 Select this to offload Samsung S5PV210 or S5PC110 from AES
291 algorithms execution.
292
272endif # CRYPTO_HW 293endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 5203e34248d7..53ea50155319 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,8 +6,10 @@ n2_crypto-y := n2_core.o n2_asm.o
6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o 7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
9obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 10obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 11obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o 12obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
12obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o 13obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
13obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o 14obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
15obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
new file mode 100644
index 000000000000..2d876bb98ff4
--- /dev/null
+++ b/drivers/crypto/caam/Kconfig
@@ -0,0 +1,72 @@
1config CRYPTO_DEV_FSL_CAAM
2 tristate "Freescale CAAM-Multicore driver backend"
3 depends on FSL_SOC
4 help
5 Enables the driver module for Freescale's Cryptographic Accelerator
6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
7 This module adds a job ring operation interface, and configures h/w
8 to operate as a DPAA component automatically, depending
9 on h/w feature availability.
10
11 To compile this driver as a module, choose M here: the module
12 will be called caam.
13
14config CRYPTO_DEV_FSL_CAAM_RINGSIZE
15 int "Job Ring size"
16 depends on CRYPTO_DEV_FSL_CAAM
17 range 2 9
18 default "9"
19 help
20 Select size of Job Rings as a power of 2, within the
21 range 2-9 (ring size 4-512).
22 Examples:
23 2 => 4
24 3 => 8
25 4 => 16
26 5 => 32
27 6 => 64
28 7 => 128
29 8 => 256
30 9 => 512
31
32config CRYPTO_DEV_FSL_CAAM_INTC
33 bool "Job Ring interrupt coalescing"
34 depends on CRYPTO_DEV_FSL_CAAM
35 default y
36 help
37 Enable the Job Ring's interrupt coalescing feature.
38
39config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
40 int "Job Ring interrupt coalescing count threshold"
41 depends on CRYPTO_DEV_FSL_CAAM_INTC
42 range 1 255
43 default 255
44 help
45 Select number of descriptor completions to queue before
46 raising an interrupt, in the range 1-255. Note that a selection
47 of 1 functionally defeats the coalescing feature, and a selection
48 equal or greater than the job ring size will force timeouts.
49
50config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
51 int "Job Ring interrupt coalescing timer threshold"
52 depends on CRYPTO_DEV_FSL_CAAM_INTC
53 range 1 65535
54 default 2048
55 help
56 Select number of bus clocks/64 to timeout in the case that one or
57 more descriptor completions are queued without reaching the count
58 threshold. Range is 1-65535.
59
60config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
61 tristate "Register algorithm implementations with the Crypto API"
62 depends on CRYPTO_DEV_FSL_CAAM
63 default y
64 select CRYPTO_ALGAPI
65 select CRYPTO_AUTHENC
66 help
67 Selecting this will offload crypto for users of the
68 scatterlist crypto API (such as the linux native IPSec
69 stack) to the SEC4 via job ring.
70
71 To compile this as a module, choose M here: the module
72 will be called caamalg.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
new file mode 100644
index 000000000000..ef39011b4505
--- /dev/null
+++ b/drivers/crypto/caam/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the CAAM backend and dependent components
3#
4
5obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
6obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7
8caam-objs := ctrl.o jr.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
new file mode 100644
index 000000000000..d0e65d6ddc77
--- /dev/null
+++ b/drivers/crypto/caam/caamalg.c
@@ -0,0 +1,1268 @@
1/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | SEQ_IN_PTR |
41 * | (input buffer) |
42 * | LOAD (to DECO) |
43 * ---------------------
44 */
45
46#include "compat.h"
47
48#include "regs.h"
49#include "intern.h"
50#include "desc_constr.h"
51#include "jr.h"
52#include "error.h"
53
54/*
55 * crypto alg
56 */
57#define CAAM_CRA_PRIORITY 3000
58/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
59#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
60 SHA512_DIGEST_SIZE * 2)
61/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
62#define CAAM_MAX_IV_LENGTH 16
63
64/* length of descriptors text */
65#define DESC_AEAD_SHARED_TEXT_LEN 4
66#define DESC_AEAD_ENCRYPT_TEXT_LEN 21
67#define DESC_AEAD_DECRYPT_TEXT_LEN 24
68#define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27
69
70#ifdef DEBUG
71/* for print_hex_dumps with line references */
72#define xstr(s) str(s)
73#define str(s) #s
74#define debug(format, arg...) printk(format, arg)
75#else
76#define debug(format, arg...)
77#endif
78
79/*
80 * per-session context
81 */
82struct caam_ctx {
83 struct device *jrdev;
84 u32 *sh_desc;
85 dma_addr_t shared_desc_phys;
86 u32 class1_alg_type;
87 u32 class2_alg_type;
88 u32 alg_op;
89 u8 *key;
90 dma_addr_t key_phys;
91 unsigned int enckeylen;
92 unsigned int split_key_len;
93 unsigned int split_key_pad_len;
94 unsigned int authsize;
95};
96
97static int aead_authenc_setauthsize(struct crypto_aead *authenc,
98 unsigned int authsize)
99{
100 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
101
102 ctx->authsize = authsize;
103
104 return 0;
105}
106
107struct split_key_result {
108 struct completion completion;
109 int err;
110};
111
112static void split_key_done(struct device *dev, u32 *desc, u32 err,
113 void *context)
114{
115 struct split_key_result *res = context;
116
117#ifdef DEBUG
118 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
119#endif
120 if (err) {
121 char tmp[CAAM_ERROR_STR_MAX];
122
123 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
124 }
125
126 res->err = err;
127
128 complete(&res->completion);
129}
130
131/*
132get a split ipad/opad key
133
134Split key generation-----------------------------------------------
135
136[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
137[01] 0x04000014 key: class2->keyreg len=20
138 @0xffe01000
139[03] 0x84410014 operation: cls2-op sha1 hmac init dec
140[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
141[05] 0xa4000001 jump: class2 local all ->1 [06]
142[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
143 @0xffe04000
144*/
145static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
146{
147 struct device *jrdev = ctx->jrdev;
148 u32 *desc;
149 struct split_key_result result;
150 dma_addr_t dma_addr_in, dma_addr_out;
151 int ret = 0;
152
153 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
154
155 init_job_desc(desc, 0);
156
157 dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
158 DMA_TO_DEVICE);
159 if (dma_mapping_error(jrdev, dma_addr_in)) {
160 dev_err(jrdev, "unable to map key input memory\n");
161 kfree(desc);
162 return -ENOMEM;
163 }
164 append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
165 KEY_DEST_CLASS_REG);
166
167 /* Sets MDHA up into an HMAC-INIT */
168 append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
169 OP_ALG_AS_INIT);
170
171 /*
172 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
173 into both pads inside MDHA
174 */
175 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
176 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
177
178 /*
179 * FIFO_STORE with the explicit split-key content store
180 * (0x26 output type)
181 */
182 dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
183 DMA_FROM_DEVICE);
184 if (dma_mapping_error(jrdev, dma_addr_out)) {
185 dev_err(jrdev, "unable to map key output memory\n");
186 kfree(desc);
187 return -ENOMEM;
188 }
189 append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
190 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
191
192#ifdef DEBUG
193 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
194 DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
195 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
196 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
197#endif
198
199 result.err = 0;
200 init_completion(&result.completion);
201
202 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
203 if (!ret) {
204 /* in progress */
205 wait_for_completion_interruptible(&result.completion);
206 ret = result.err;
207#ifdef DEBUG
208 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
209 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
210 ctx->split_key_pad_len, 1);
211#endif
212 }
213
214 dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
215 DMA_FROM_DEVICE);
216 dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
217
218 kfree(desc);
219
220 return ret;
221}
222
223static int build_sh_desc_ipsec(struct caam_ctx *ctx)
224{
225 struct device *jrdev = ctx->jrdev;
226 u32 *sh_desc;
227 u32 *jump_cmd;
228 bool keys_fit_inline = 0;
229
230 /*
231 * largest Job Descriptor and its Shared Descriptor
232 * must both fit into the 64-word Descriptor h/w Buffer
233 */
234 if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN +
235 DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ +
236 ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
237 keys_fit_inline = 1;
238
239 /* build shared descriptor for this session */
240 sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
241 keys_fit_inline ?
242 ctx->split_key_pad_len + ctx->enckeylen :
243 CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL);
244 if (!sh_desc) {
245 dev_err(jrdev, "could not allocate shared descriptor\n");
246 return -ENOMEM;
247 }
248
249 init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL);
250
251 jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL |
252 JUMP_COND_SHRD | JUMP_COND_SELF);
253
254 /*
255 * process keys, starting with class 2/authentication.
256 */
257 if (keys_fit_inline) {
258 append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len,
259 ctx->split_key_len,
260 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
261
262 append_key_as_imm(sh_desc, (void *)ctx->key +
263 ctx->split_key_pad_len, ctx->enckeylen,
264 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
265 } else {
266 append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 |
267 KEY_DEST_MDHA_SPLIT | KEY_ENC);
268 append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len,
269 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
270 }
271
272 /* update jump cmd now that we are at the jump target */
273 set_jump_tgt_here(sh_desc, jump_cmd);
274
275 ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc,
276 desc_bytes(sh_desc),
277 DMA_TO_DEVICE);
278 if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) {
279 dev_err(jrdev, "unable to map shared descriptor\n");
280 kfree(sh_desc);
281 return -ENOMEM;
282 }
283
284 ctx->sh_desc = sh_desc;
285
286 return 0;
287}
288
289static int aead_authenc_setkey(struct crypto_aead *aead,
290 const u8 *key, unsigned int keylen)
291{
292 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
293 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
294 struct caam_ctx *ctx = crypto_aead_ctx(aead);
295 struct device *jrdev = ctx->jrdev;
296 struct rtattr *rta = (void *)key;
297 struct crypto_authenc_key_param *param;
298 unsigned int authkeylen;
299 unsigned int enckeylen;
300 int ret = 0;
301
302 param = RTA_DATA(rta);
303 enckeylen = be32_to_cpu(param->enckeylen);
304
305 key += RTA_ALIGN(rta->rta_len);
306 keylen -= RTA_ALIGN(rta->rta_len);
307
308 if (keylen < enckeylen)
309 goto badkey;
310
311 authkeylen = keylen - enckeylen;
312
313 if (keylen > CAAM_MAX_KEY_SIZE)
314 goto badkey;
315
316 /* Pick class 2 key length from algorithm submask */
317 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
318 OP_ALG_ALGSEL_SHIFT] * 2;
319 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
320
321#ifdef DEBUG
322 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
323 keylen, enckeylen, authkeylen);
324 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
325 ctx->split_key_len, ctx->split_key_pad_len);
326 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
327 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
328#endif
329 ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen,
330 GFP_KERNEL | GFP_DMA);
331 if (!ctx->key) {
332 dev_err(jrdev, "could not allocate key output memory\n");
333 return -ENOMEM;
334 }
335
336 ret = gen_split_key(ctx, key, authkeylen);
337 if (ret) {
338 kfree(ctx->key);
339 goto badkey;
340 }
341
342 /* postpend encryption key to auth split key */
343 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
344
345 ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
346 enckeylen, DMA_TO_DEVICE);
347 if (dma_mapping_error(jrdev, ctx->key_phys)) {
348 dev_err(jrdev, "unable to map key i/o memory\n");
349 kfree(ctx->key);
350 return -ENOMEM;
351 }
352#ifdef DEBUG
353 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
354 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
355 ctx->split_key_pad_len + enckeylen, 1);
356#endif
357
358 ctx->enckeylen = enckeylen;
359
360 ret = build_sh_desc_ipsec(ctx);
361 if (ret) {
362 dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len +
363 enckeylen, DMA_TO_DEVICE);
364 kfree(ctx->key);
365 }
366
367 return ret;
368badkey:
369 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
370 return -EINVAL;
371}
372
373struct link_tbl_entry {
374 u64 ptr;
375 u32 len;
376 u8 reserved;
377 u8 buf_pool_id;
378 u16 offset;
379};
380
381/*
382 * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
383 * @src_nents: number of segments in input scatterlist
384 * @dst_nents: number of segments in output scatterlist
385 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
386 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
387 * @link_tbl_bytes: length of dma mapped link_tbl space
388 * @link_tbl_dma: bus physical mapped address of h/w link table
389 * @hw_desc: the h/w job descriptor followed by any referenced link tables
390 */
391struct ipsec_esp_edesc {
392 int assoc_nents;
393 int src_nents;
394 int dst_nents;
395 int link_tbl_bytes;
396 dma_addr_t link_tbl_dma;
397 struct link_tbl_entry *link_tbl;
398 u32 hw_desc[0];
399};
400
401static void ipsec_esp_unmap(struct device *dev,
402 struct ipsec_esp_edesc *edesc,
403 struct aead_request *areq)
404{
405 dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
406
407 if (unlikely(areq->dst != areq->src)) {
408 dma_unmap_sg(dev, areq->src, edesc->src_nents,
409 DMA_TO_DEVICE);
410 dma_unmap_sg(dev, areq->dst, edesc->dst_nents,
411 DMA_FROM_DEVICE);
412 } else {
413 dma_unmap_sg(dev, areq->src, edesc->src_nents,
414 DMA_BIDIRECTIONAL);
415 }
416
417 if (edesc->link_tbl_bytes)
418 dma_unmap_single(dev, edesc->link_tbl_dma,
419 edesc->link_tbl_bytes,
420 DMA_TO_DEVICE);
421}
422
423/*
424 * ipsec_esp descriptor callbacks
425 */
426static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
427 void *context)
428{
429 struct aead_request *areq = context;
430 struct ipsec_esp_edesc *edesc;
431#ifdef DEBUG
432 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
433 int ivsize = crypto_aead_ivsize(aead);
434 struct caam_ctx *ctx = crypto_aead_ctx(aead);
435
436 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
437#endif
438 edesc = (struct ipsec_esp_edesc *)((char *)desc -
439 offsetof(struct ipsec_esp_edesc, hw_desc));
440
441 if (err) {
442 char tmp[CAAM_ERROR_STR_MAX];
443
444 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
445 }
446
447 ipsec_esp_unmap(jrdev, edesc, areq);
448
449#ifdef DEBUG
450 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
451 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
452 areq->assoclen , 1);
453 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
454 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
455 edesc->src_nents ? 100 : ivsize, 1);
456 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
457 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
458 edesc->src_nents ? 100 : areq->cryptlen +
459 ctx->authsize + 4, 1);
460#endif
461
462 kfree(edesc);
463
464 aead_request_complete(areq, err);
465}
466
467static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
468 void *context)
469{
470 struct aead_request *areq = context;
471 struct ipsec_esp_edesc *edesc;
472#ifdef DEBUG
473 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
474 struct caam_ctx *ctx = crypto_aead_ctx(aead);
475
476 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
477#endif
478 edesc = (struct ipsec_esp_edesc *)((char *)desc -
479 offsetof(struct ipsec_esp_edesc, hw_desc));
480
481 if (err) {
482 char tmp[CAAM_ERROR_STR_MAX];
483
484 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
485 }
486
487 ipsec_esp_unmap(jrdev, edesc, areq);
488
489 /*
490 * verify hw auth check passed else return -EBADMSG
491 */
492 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
493 err = -EBADMSG;
494
495#ifdef DEBUG
496 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
497 DUMP_PREFIX_ADDRESS, 16, 4,
498 ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)),
499 sizeof(struct iphdr) + areq->assoclen +
500 ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) +
501 ctx->authsize + 36, 1);
502 if (!err && edesc->link_tbl_bytes) {
503 struct scatterlist *sg = sg_last(areq->src, edesc->src_nents);
504 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
505 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
506 sg->length + ctx->authsize + 16, 1);
507 }
508#endif
509 kfree(edesc);
510
511 aead_request_complete(areq, err);
512}
513
514/*
515 * convert scatterlist to h/w link table format
516 * scatterlist must have been previously dma mapped
517 */
518static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
519 struct link_tbl_entry *link_tbl_ptr, u32 offset)
520{
521 while (sg_count) {
522 link_tbl_ptr->ptr = sg_dma_address(sg);
523 link_tbl_ptr->len = sg_dma_len(sg);
524 link_tbl_ptr->reserved = 0;
525 link_tbl_ptr->buf_pool_id = 0;
526 link_tbl_ptr->offset = offset;
527 link_tbl_ptr++;
528 sg = sg_next(sg);
529 sg_count--;
530 }
531
532 /* set Final bit (marks end of link table) */
533 link_tbl_ptr--;
534 link_tbl_ptr->len |= 0x40000000;
535}
536
537/*
538 * fill in and submit ipsec_esp job descriptor
539 */
540static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
541 u32 encrypt,
542 void (*callback) (struct device *dev, u32 *desc,
543 u32 err, void *context))
544{
545 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
546 struct caam_ctx *ctx = crypto_aead_ctx(aead);
547 struct device *jrdev = ctx->jrdev;
548 u32 *desc = edesc->hw_desc, options;
549 int ret, sg_count, assoc_sg_count;
550 int ivsize = crypto_aead_ivsize(aead);
551 int authsize = ctx->authsize;
552 dma_addr_t ptr, dst_dma, src_dma;
553#ifdef DEBUG
554 u32 *sh_desc = ctx->sh_desc;
555
556 debug("assoclen %d cryptlen %d authsize %d\n",
557 areq->assoclen, areq->cryptlen, authsize);
558 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
559 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
560 areq->assoclen , 1);
561 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
562 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
563 edesc->src_nents ? 100 : ivsize, 1);
564 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
565 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
566 edesc->src_nents ? 100 : areq->cryptlen + authsize, 1);
567 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
568 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
569 desc_bytes(sh_desc), 1);
570#endif
571 assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1,
572 DMA_TO_DEVICE);
573 if (areq->src == areq->dst)
574 sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
575 DMA_BIDIRECTIONAL);
576 else
577 sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
578 DMA_TO_DEVICE);
579
580 /* start auth operation */
581 append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL |
582 (encrypt ? : OP_ALG_ICV_ON));
583
584 /* Load FIFO with data for Class 2 CHA */
585 options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
586 if (!edesc->assoc_nents) {
587 ptr = sg_dma_address(areq->assoc);
588 } else {
589 sg_to_link_tbl(areq->assoc, edesc->assoc_nents,
590 edesc->link_tbl, 0);
591 ptr = edesc->link_tbl_dma;
592 options |= LDST_SGF;
593 }
594 append_fifo_load(desc, ptr, areq->assoclen, options);
595
596 /* copy iv from cipher/class1 input context to class2 infifo */
597 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
598
599 if (!encrypt) {
600 u32 *jump_cmd, *uncond_jump_cmd;
601
602 /* JUMP if shared */
603 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
604
605 /* start class 1 (cipher) operation, non-shared version */
606 append_operation(desc, ctx->class1_alg_type |
607 OP_ALG_AS_INITFINAL);
608
609 uncond_jump_cmd = append_jump(desc, 0);
610
611 set_jump_tgt_here(desc, jump_cmd);
612
613 /* start class 1 (cipher) operation, shared version */
614 append_operation(desc, ctx->class1_alg_type |
615 OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK);
616 set_jump_tgt_here(desc, uncond_jump_cmd);
617 } else
618 append_operation(desc, ctx->class1_alg_type |
619 OP_ALG_AS_INITFINAL | encrypt);
620
621 /* load payload & instruct to class2 to snoop class 1 if encrypting */
622 options = 0;
623 if (!edesc->src_nents) {
624 src_dma = sg_dma_address(areq->src);
625 } else {
626 sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl +
627 edesc->assoc_nents, 0);
628 src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
629 sizeof(struct link_tbl_entry);
630 options |= LDST_SGF;
631 }
632 append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options);
633 append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH |
634 FIFOLD_TYPE_LASTBOTH |
635 (encrypt ? FIFOLD_TYPE_MSG1OUT2
636 : FIFOLD_TYPE_MSG));
637
638 /* specify destination */
639 if (areq->src == areq->dst) {
640 dst_dma = src_dma;
641 } else {
642 sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1,
643 DMA_FROM_DEVICE);
644 if (!edesc->dst_nents) {
645 dst_dma = sg_dma_address(areq->dst);
646 options = 0;
647 } else {
648 sg_to_link_tbl(areq->dst, edesc->dst_nents,
649 edesc->link_tbl + edesc->assoc_nents +
650 edesc->src_nents, 0);
651 dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
652 edesc->src_nents) *
653 sizeof(struct link_tbl_entry);
654 options = LDST_SGF;
655 }
656 }
657 append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options);
658 append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
659
660 /* ICV */
661 if (encrypt)
662 append_seq_store(desc, authsize, LDST_CLASS_2_CCB |
663 LDST_SRCDST_BYTE_CONTEXT);
664 else
665 append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 |
666 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
667
668#ifdef DEBUG
669 debug("job_desc_len %d\n", desc_len(desc));
670 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
671 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1);
672 print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ",
673 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
674 edesc->link_tbl_bytes, 1);
675#endif
676
677 ret = caam_jr_enqueue(jrdev, desc, callback, areq);
678 if (!ret)
679 ret = -EINPROGRESS;
680 else {
681 ipsec_esp_unmap(jrdev, edesc, areq);
682 kfree(edesc);
683 }
684
685 return ret;
686}
687
688/*
689 * derive number of elements in scatterlist
690 */
691static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
692{
693 struct scatterlist *sg = sg_list;
694 int sg_nents = 0;
695
696 *chained = 0;
697 while (nbytes > 0) {
698 sg_nents++;
699 nbytes -= sg->length;
700 if (!sg_is_last(sg) && (sg + 1)->length == 0)
701 *chained = 1;
702 sg = scatterwalk_sg_next(sg);
703 }
704
705 return sg_nents;
706}
707
708/*
709 * allocate and map the ipsec_esp extended descriptor
710 */
711static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
712 int desc_bytes)
713{
714 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
715 struct caam_ctx *ctx = crypto_aead_ctx(aead);
716 struct device *jrdev = ctx->jrdev;
717 gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
718 GFP_ATOMIC;
719 int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
720 struct ipsec_esp_edesc *edesc;
721
722 assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained);
723 BUG_ON(chained);
724 if (likely(assoc_nents == 1))
725 assoc_nents = 0;
726
727 src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize,
728 &chained);
729 BUG_ON(chained);
730 if (src_nents == 1)
731 src_nents = 0;
732
733 if (unlikely(areq->dst != areq->src)) {
734 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize,
735 &chained);
736 BUG_ON(chained);
737 if (dst_nents == 1)
738 dst_nents = 0;
739 }
740
741 link_tbl_bytes = (assoc_nents + src_nents + dst_nents) *
742 sizeof(struct link_tbl_entry);
743 debug("link_tbl_bytes %d\n", link_tbl_bytes);
744
745 /* allocate space for base edesc and hw desc commands, link tables */
746 edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes +
747 link_tbl_bytes, GFP_DMA | flags);
748 if (!edesc) {
749 dev_err(jrdev, "could not allocate extended descriptor\n");
750 return ERR_PTR(-ENOMEM);
751 }
752
753 edesc->assoc_nents = assoc_nents;
754 edesc->src_nents = src_nents;
755 edesc->dst_nents = dst_nents;
756 edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) +
757 desc_bytes;
758 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
759 link_tbl_bytes, DMA_TO_DEVICE);
760 edesc->link_tbl_bytes = link_tbl_bytes;
761
762 return edesc;
763}
764
765static int aead_authenc_encrypt(struct aead_request *areq)
766{
767 struct ipsec_esp_edesc *edesc;
768 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
769 struct caam_ctx *ctx = crypto_aead_ctx(aead);
770 struct device *jrdev = ctx->jrdev;
771 int ivsize = crypto_aead_ivsize(aead);
772 u32 *desc;
773 dma_addr_t iv_dma;
774
775 /* allocate extended descriptor */
776 edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN *
777 CAAM_CMD_SZ);
778 if (IS_ERR(edesc))
779 return PTR_ERR(edesc);
780
781 desc = edesc->hw_desc;
782
783 /* insert shared descriptor pointer */
784 init_job_desc_shared(desc, ctx->shared_desc_phys,
785 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
786
787 iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE);
788 /* check dma error */
789
790 append_load(desc, iv_dma, ivsize,
791 LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
792
793 return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
794}
795
796static int aead_authenc_decrypt(struct aead_request *req)
797{
798 struct crypto_aead *aead = crypto_aead_reqtfm(req);
799 int ivsize = crypto_aead_ivsize(aead);
800 struct caam_ctx *ctx = crypto_aead_ctx(aead);
801 struct device *jrdev = ctx->jrdev;
802 struct ipsec_esp_edesc *edesc;
803 u32 *desc;
804 dma_addr_t iv_dma;
805
806 req->cryptlen -= ctx->authsize;
807
808 /* allocate extended descriptor */
809 edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN *
810 CAAM_CMD_SZ);
811 if (IS_ERR(edesc))
812 return PTR_ERR(edesc);
813
814 desc = edesc->hw_desc;
815
816 /* insert shared descriptor pointer */
817 init_job_desc_shared(desc, ctx->shared_desc_phys,
818 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
819
820 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
821 /* check dma error */
822
823 append_load(desc, iv_dma, ivsize,
824 LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
825
826 return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done);
827}
828
829static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
830{
831 struct aead_request *areq = &req->areq;
832 struct ipsec_esp_edesc *edesc;
833 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
834 struct caam_ctx *ctx = crypto_aead_ctx(aead);
835 struct device *jrdev = ctx->jrdev;
836 int ivsize = crypto_aead_ivsize(aead);
837 dma_addr_t iv_dma;
838 u32 *desc;
839
840 iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE);
841
842 debug("%s: giv %p\n", __func__, req->giv);
843
844 /* allocate extended descriptor */
845 edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN *
846 CAAM_CMD_SZ);
847 if (IS_ERR(edesc))
848 return PTR_ERR(edesc);
849
850 desc = edesc->hw_desc;
851
852 /* insert shared descriptor pointer */
853 init_job_desc_shared(desc, ctx->shared_desc_phys,
854 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
855
856 /*
857 * LOAD IMM Info FIFO
858 * to DECO, Last, Padding, Random, Message, 16 bytes
859 */
860 append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 |
861 NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG |
862 NFIFOENTRY_PTYPE_RND | ivsize,
863 LDST_SRCDST_WORD_INFO_FIFO);
864
865 /*
866 * disable info fifo entries since the above serves as the entry
867 * this way, the MOVE command won't generate an entry.
868 * Note that this isn't required in more recent versions of
869 * SEC as a MOVE that doesn't do info FIFO entries is available.
870 */
871 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
872
873 /* MOVE DECO Alignment -> C1 Context 16 bytes */
874 append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize);
875
876 /* re-enable info fifo entries */
877 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
878
879 /* MOVE C1 Context -> OFIFO 16 bytes */
880 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize);
881
882 append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
883
884 return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
885}
886
887struct caam_alg_template {
888 char name[CRYPTO_MAX_ALG_NAME];
889 char driver_name[CRYPTO_MAX_ALG_NAME];
890 unsigned int blocksize;
891 struct aead_alg aead;
892 u32 class1_alg_type;
893 u32 class2_alg_type;
894 u32 alg_op;
895};
896
897static struct caam_alg_template driver_algs[] = {
898 /* single-pass ipsec_esp descriptor */
899 {
900 .name = "authenc(hmac(sha1),cbc(aes))",
901 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
902 .blocksize = AES_BLOCK_SIZE,
903 .aead = {
904 .setkey = aead_authenc_setkey,
905 .setauthsize = aead_authenc_setauthsize,
906 .encrypt = aead_authenc_encrypt,
907 .decrypt = aead_authenc_decrypt,
908 .givencrypt = aead_authenc_givencrypt,
909 .geniv = "<built-in>",
910 .ivsize = AES_BLOCK_SIZE,
911 .maxauthsize = SHA1_DIGEST_SIZE,
912 },
913 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
914 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
915 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
916 },
917 {
918 .name = "authenc(hmac(sha256),cbc(aes))",
919 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
920 .blocksize = AES_BLOCK_SIZE,
921 .aead = {
922 .setkey = aead_authenc_setkey,
923 .setauthsize = aead_authenc_setauthsize,
924 .encrypt = aead_authenc_encrypt,
925 .decrypt = aead_authenc_decrypt,
926 .givencrypt = aead_authenc_givencrypt,
927 .geniv = "<built-in>",
928 .ivsize = AES_BLOCK_SIZE,
929 .maxauthsize = SHA256_DIGEST_SIZE,
930 },
931 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
932 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
933 OP_ALG_AAI_HMAC_PRECOMP,
934 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
935 },
936 {
937 .name = "authenc(hmac(sha512),cbc(aes))",
938 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
939 .blocksize = AES_BLOCK_SIZE,
940 .aead = {
941 .setkey = aead_authenc_setkey,
942 .setauthsize = aead_authenc_setauthsize,
943 .encrypt = aead_authenc_encrypt,
944 .decrypt = aead_authenc_decrypt,
945 .givencrypt = aead_authenc_givencrypt,
946 .geniv = "<built-in>",
947 .ivsize = AES_BLOCK_SIZE,
948 .maxauthsize = SHA512_DIGEST_SIZE,
949 },
950 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
951 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
952 OP_ALG_AAI_HMAC_PRECOMP,
953 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
954 },
955 {
956 .name = "authenc(hmac(sha1),cbc(des3_ede))",
957 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
958 .blocksize = DES3_EDE_BLOCK_SIZE,
959 .aead = {
960 .setkey = aead_authenc_setkey,
961 .setauthsize = aead_authenc_setauthsize,
962 .encrypt = aead_authenc_encrypt,
963 .decrypt = aead_authenc_decrypt,
964 .givencrypt = aead_authenc_givencrypt,
965 .geniv = "<built-in>",
966 .ivsize = DES3_EDE_BLOCK_SIZE,
967 .maxauthsize = SHA1_DIGEST_SIZE,
968 },
969 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
970 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
971 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
972 },
973 {
974 .name = "authenc(hmac(sha256),cbc(des3_ede))",
975 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
976 .blocksize = DES3_EDE_BLOCK_SIZE,
977 .aead = {
978 .setkey = aead_authenc_setkey,
979 .setauthsize = aead_authenc_setauthsize,
980 .encrypt = aead_authenc_encrypt,
981 .decrypt = aead_authenc_decrypt,
982 .givencrypt = aead_authenc_givencrypt,
983 .geniv = "<built-in>",
984 .ivsize = DES3_EDE_BLOCK_SIZE,
985 .maxauthsize = SHA256_DIGEST_SIZE,
986 },
987 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
988 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
989 OP_ALG_AAI_HMAC_PRECOMP,
990 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
991 },
992 {
993 .name = "authenc(hmac(sha512),cbc(des3_ede))",
994 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
995 .blocksize = DES3_EDE_BLOCK_SIZE,
996 .aead = {
997 .setkey = aead_authenc_setkey,
998 .setauthsize = aead_authenc_setauthsize,
999 .encrypt = aead_authenc_encrypt,
1000 .decrypt = aead_authenc_decrypt,
1001 .givencrypt = aead_authenc_givencrypt,
1002 .geniv = "<built-in>",
1003 .ivsize = DES3_EDE_BLOCK_SIZE,
1004 .maxauthsize = SHA512_DIGEST_SIZE,
1005 },
1006 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1007 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1008 OP_ALG_AAI_HMAC_PRECOMP,
1009 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1010 },
1011 {
1012 .name = "authenc(hmac(sha1),cbc(des))",
1013 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1014 .blocksize = DES_BLOCK_SIZE,
1015 .aead = {
1016 .setkey = aead_authenc_setkey,
1017 .setauthsize = aead_authenc_setauthsize,
1018 .encrypt = aead_authenc_encrypt,
1019 .decrypt = aead_authenc_decrypt,
1020 .givencrypt = aead_authenc_givencrypt,
1021 .geniv = "<built-in>",
1022 .ivsize = DES_BLOCK_SIZE,
1023 .maxauthsize = SHA1_DIGEST_SIZE,
1024 },
1025 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1026 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1027 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1028 },
1029 {
1030 .name = "authenc(hmac(sha256),cbc(des))",
1031 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1032 .blocksize = DES_BLOCK_SIZE,
1033 .aead = {
1034 .setkey = aead_authenc_setkey,
1035 .setauthsize = aead_authenc_setauthsize,
1036 .encrypt = aead_authenc_encrypt,
1037 .decrypt = aead_authenc_decrypt,
1038 .givencrypt = aead_authenc_givencrypt,
1039 .geniv = "<built-in>",
1040 .ivsize = DES_BLOCK_SIZE,
1041 .maxauthsize = SHA256_DIGEST_SIZE,
1042 },
1043 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1044 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1045 OP_ALG_AAI_HMAC_PRECOMP,
1046 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1047 },
1048 {
1049 .name = "authenc(hmac(sha512),cbc(des))",
1050 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1051 .blocksize = DES_BLOCK_SIZE,
1052 .aead = {
1053 .setkey = aead_authenc_setkey,
1054 .setauthsize = aead_authenc_setauthsize,
1055 .encrypt = aead_authenc_encrypt,
1056 .decrypt = aead_authenc_decrypt,
1057 .givencrypt = aead_authenc_givencrypt,
1058 .geniv = "<built-in>",
1059 .ivsize = DES_BLOCK_SIZE,
1060 .maxauthsize = SHA512_DIGEST_SIZE,
1061 },
1062 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1063 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1064 OP_ALG_AAI_HMAC_PRECOMP,
1065 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1066 },
1067};
1068
1069struct caam_crypto_alg {
1070 struct list_head entry;
1071 struct device *ctrldev;
1072 int class1_alg_type;
1073 int class2_alg_type;
1074 int alg_op;
1075 struct crypto_alg crypto_alg;
1076};
1077
1078static int caam_cra_init(struct crypto_tfm *tfm)
1079{
1080 struct crypto_alg *alg = tfm->__crt_alg;
1081 struct caam_crypto_alg *caam_alg =
1082 container_of(alg, struct caam_crypto_alg, crypto_alg);
1083 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1084 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
1085 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1086
1087 /*
1088 * distribute tfms across job rings to ensure in-order
1089 * crypto request processing per tfm
1090 */
1091 ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
1092
1093 /* copy descriptor header template value */
1094 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
1095 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
1096 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
1097
1098 return 0;
1099}
1100
1101static void caam_cra_exit(struct crypto_tfm *tfm)
1102{
1103 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1104
1105 if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys))
1106 dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys,
1107 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
1108 kfree(ctx->sh_desc);
1109
1110 if (!dma_mapping_error(ctx->jrdev, ctx->key_phys))
1111 dma_unmap_single(ctx->jrdev, ctx->key_phys,
1112 ctx->split_key_pad_len + ctx->enckeylen,
1113 DMA_TO_DEVICE);
1114 kfree(ctx->key);
1115}
1116
1117static void __exit caam_algapi_exit(void)
1118{
1119
1120 struct device_node *dev_node;
1121 struct platform_device *pdev;
1122 struct device *ctrldev;
1123 struct caam_drv_private *priv;
1124 struct caam_crypto_alg *t_alg, *n;
1125 int i, err;
1126
1127 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1128 if (!dev_node)
1129 return;
1130
1131 pdev = of_find_device_by_node(dev_node);
1132 if (!pdev)
1133 return;
1134
1135 ctrldev = &pdev->dev;
1136 of_node_put(dev_node);
1137 priv = dev_get_drvdata(ctrldev);
1138
1139 if (!priv->alg_list.next)
1140 return;
1141
1142 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1143 crypto_unregister_alg(&t_alg->crypto_alg);
1144 list_del(&t_alg->entry);
1145 kfree(t_alg);
1146 }
1147
1148 for (i = 0; i < priv->total_jobrs; i++) {
1149 err = caam_jr_deregister(priv->algapi_jr[i]);
1150 if (err < 0)
1151 break;
1152 }
1153 kfree(priv->algapi_jr);
1154}
1155
1156static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
1157 struct caam_alg_template
1158 *template)
1159{
1160 struct caam_crypto_alg *t_alg;
1161 struct crypto_alg *alg;
1162
1163 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
1164 if (!t_alg) {
1165 dev_err(ctrldev, "failed to allocate t_alg\n");
1166 return ERR_PTR(-ENOMEM);
1167 }
1168
1169 alg = &t_alg->crypto_alg;
1170
1171 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1172 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1173 template->driver_name);
1174 alg->cra_module = THIS_MODULE;
1175 alg->cra_init = caam_cra_init;
1176 alg->cra_exit = caam_cra_exit;
1177 alg->cra_priority = CAAM_CRA_PRIORITY;
1178 alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
1179 alg->cra_blocksize = template->blocksize;
1180 alg->cra_alignmask = 0;
1181 alg->cra_type = &crypto_aead_type;
1182 alg->cra_ctxsize = sizeof(struct caam_ctx);
1183 alg->cra_u.aead = template->aead;
1184
1185 t_alg->class1_alg_type = template->class1_alg_type;
1186 t_alg->class2_alg_type = template->class2_alg_type;
1187 t_alg->alg_op = template->alg_op;
1188 t_alg->ctrldev = ctrldev;
1189
1190 return t_alg;
1191}
1192
1193static int __init caam_algapi_init(void)
1194{
1195 struct device_node *dev_node;
1196 struct platform_device *pdev;
1197 struct device *ctrldev, **jrdev;
1198 struct caam_drv_private *priv;
1199 int i = 0, err = 0;
1200
1201 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1202 if (!dev_node)
1203 return -ENODEV;
1204
1205 pdev = of_find_device_by_node(dev_node);
1206 if (!pdev)
1207 return -ENODEV;
1208
1209 ctrldev = &pdev->dev;
1210 priv = dev_get_drvdata(ctrldev);
1211 of_node_put(dev_node);
1212
1213 INIT_LIST_HEAD(&priv->alg_list);
1214
1215 jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
1216 if (!jrdev)
1217 return -ENOMEM;
1218
1219 for (i = 0; i < priv->total_jobrs; i++) {
1220 err = caam_jr_register(ctrldev, &jrdev[i]);
1221 if (err < 0)
1222 break;
1223 }
1224 if (err < 0 && i == 0) {
1225 dev_err(ctrldev, "algapi error in job ring registration: %d\n",
1226 err);
1227 kfree(jrdev);
1228 return err;
1229 }
1230
1231 priv->num_jrs_for_algapi = i;
1232 priv->algapi_jr = jrdev;
1233 atomic_set(&priv->tfm_count, -1);
1234
1235 /* register crypto algorithms the device supports */
1236 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1237 /* TODO: check if h/w supports alg */
1238 struct caam_crypto_alg *t_alg;
1239
1240 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
1241 if (IS_ERR(t_alg)) {
1242 err = PTR_ERR(t_alg);
1243 dev_warn(ctrldev, "%s alg allocation failed\n",
1244 driver_algs[i].driver_name);
1245 continue;
1246 }
1247
1248 err = crypto_register_alg(&t_alg->crypto_alg);
1249 if (err) {
1250 dev_warn(ctrldev, "%s alg registration failed\n",
1251 t_alg->crypto_alg.cra_driver_name);
1252 kfree(t_alg);
1253 } else {
1254 list_add_tail(&t_alg->entry, &priv->alg_list);
1255 dev_info(ctrldev, "%s\n",
1256 t_alg->crypto_alg.cra_driver_name);
1257 }
1258 }
1259
1260 return err;
1261}
1262
1263module_init(caam_algapi_init);
1264module_exit(caam_algapi_exit);
1265
1266MODULE_LICENSE("GPL");
1267MODULE_DESCRIPTION("FSL CAAM support for crypto API");
1268MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
new file mode 100644
index 000000000000..950450346f70
--- /dev/null
+++ b/drivers/crypto/caam/compat.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2008-2011 Freescale Semiconductor, Inc.
3 */
4
5#ifndef CAAM_COMPAT_H
6#define CAAM_COMPAT_H
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mod_devicetable.h>
11#include <linux/device.h>
12#include <linux/interrupt.h>
13#include <linux/crypto.h>
14#include <linux/hw_random.h>
15#include <linux/of_platform.h>
16#include <linux/dma-mapping.h>
17#include <linux/io.h>
18#include <linux/spinlock.h>
19#include <linux/rtnetlink.h>
20#include <linux/in.h>
21#include <linux/slab.h>
22#include <linux/types.h>
23#include <linux/debugfs.h>
24#include <linux/circ_buf.h>
25#include <net/xfrm.h>
26
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/des.h>
30#include <crypto/sha.h>
31#include <crypto/aead.h>
32#include <crypto/authenc.h>
33#include <crypto/scatterwalk.h>
34
35#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
new file mode 100644
index 000000000000..9009713a3c2e
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.c
@@ -0,0 +1,269 @@
1/*
2 * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#include "compat.h"
9#include "regs.h"
10#include "intern.h"
11#include "jr.h"
12
13static int caam_remove(struct platform_device *pdev)
14{
15 struct device *ctrldev;
16 struct caam_drv_private *ctrlpriv;
17 struct caam_drv_private_jr *jrpriv;
18 struct caam_full __iomem *topregs;
19 int ring, ret = 0;
20
21 ctrldev = &pdev->dev;
22 ctrlpriv = dev_get_drvdata(ctrldev);
23 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
24
25 /* shut down JobRs */
26 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
27 ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
28 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
29 irq_dispose_mapping(jrpriv->irq);
30 }
31
32 /* Shut down debug views */
33#ifdef CONFIG_DEBUG_FS
34 debugfs_remove_recursive(ctrlpriv->dfs_root);
35#endif
36
37 /* Unmap controller region */
38 iounmap(&topregs->ctrl);
39
40 kfree(ctrlpriv->jrdev);
41 kfree(ctrlpriv);
42
43 return ret;
44}
45
46/* Probe routine for CAAM top (controller) level */
47static int caam_probe(struct platform_device *pdev)
48{
49 int d, ring, rspec;
50 struct device *dev;
51 struct device_node *nprop, *np;
52 struct caam_ctrl __iomem *ctrl;
53 struct caam_full __iomem *topregs;
54 struct caam_drv_private *ctrlpriv;
55 struct caam_perfmon *perfmon;
56 struct caam_deco **deco;
57 u32 deconum;
58
59 ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
60 if (!ctrlpriv)
61 return -ENOMEM;
62
63 dev = &pdev->dev;
64 dev_set_drvdata(dev, ctrlpriv);
65 ctrlpriv->pdev = pdev;
66 nprop = pdev->dev.of_node;
67
68 /* Get configuration properties from device tree */
69 /* First, get register page */
70 ctrl = of_iomap(nprop, 0);
71 if (ctrl == NULL) {
72 dev_err(dev, "caam: of_iomap() failed\n");
73 return -ENOMEM;
74 }
75 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
76
77 /* topregs used to derive pointers to CAAM sub-blocks only */
78 topregs = (struct caam_full __iomem *)ctrl;
79
80 /* Get the IRQ of the controller (for security violations only) */
81 ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
82
83 /*
84 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
85 * 36-bit pointers in master configuration register
86 */
87 setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
88 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
89
90 if (sizeof(dma_addr_t) == sizeof(u64))
91 dma_set_mask(dev, DMA_BIT_MASK(36));
92
93 /* Find out how many DECOs are present */
94 deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
95 CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
96
97 ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
98 GFP_KERNEL);
99
100 deco = (struct caam_deco __force **)&topregs->deco;
101 for (d = 0; d < deconum; d++)
102 ctrlpriv->deco[d] = deco[d];
103
104 /*
105 * Detect and enable JobRs
106 * First, find out how many ring spec'ed, allocate references
107 * for all, then go probe each one.
108 */
109 rspec = 0;
110 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
111 rspec++;
112 ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
113 if (ctrlpriv->jrdev == NULL) {
114 iounmap(&topregs->ctrl);
115 return -ENOMEM;
116 }
117
118 ring = 0;
119 ctrlpriv->total_jobrs = 0;
120 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
121 caam_jr_probe(pdev, np, ring);
122 ctrlpriv->total_jobrs++;
123 ring++;
124 }
125
126 /* Check to see if QI present. If so, enable */
127 ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
128 CTPR_QI_MASK);
129 if (ctrlpriv->qi_present) {
130 ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
131 /* This is all that's required to physically enable QI */
132 wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
133 }
134
135 /* If no QI and no rings specified, quit and go home */
136 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
137 dev_err(dev, "no queues configured, terminating\n");
138 caam_remove(pdev);
139 return -ENOMEM;
140 }
141
142 /* NOTE: RTIC detection ought to go here, around Si time */
143
144 /* Initialize queue allocator lock */
145 spin_lock_init(&ctrlpriv->jr_alloc_lock);
146
147 /* Report "alive" for developer to see */
148 dev_info(dev, "device ID = 0x%016llx\n",
149 rd_reg64(&topregs->ctrl.perfmon.caam_id));
150 dev_info(dev, "job rings = %d, qi = %d\n",
151 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
152
153#ifdef CONFIG_DEBUG_FS
154 /*
155 * FIXME: needs better naming distinction, as some amalgamation of
156 * "caam" and nprop->full_name. The OF name isn't distinctive,
157 * but does separate instances
158 */
159 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
160
161 ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
162 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
163
164 /* Controller-level - performance monitor counters */
165 ctrlpriv->ctl_rq_dequeued =
166 debugfs_create_u64("rq_dequeued",
167 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
168 ctrlpriv->ctl, &perfmon->req_dequeued);
169 ctrlpriv->ctl_ob_enc_req =
170 debugfs_create_u64("ob_rq_encrypted",
171 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
172 ctrlpriv->ctl, &perfmon->ob_enc_req);
173 ctrlpriv->ctl_ib_dec_req =
174 debugfs_create_u64("ib_rq_decrypted",
175 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
176 ctrlpriv->ctl, &perfmon->ib_dec_req);
177 ctrlpriv->ctl_ob_enc_bytes =
178 debugfs_create_u64("ob_bytes_encrypted",
179 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
180 ctrlpriv->ctl, &perfmon->ob_enc_bytes);
181 ctrlpriv->ctl_ob_prot_bytes =
182 debugfs_create_u64("ob_bytes_protected",
183 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
184 ctrlpriv->ctl, &perfmon->ob_prot_bytes);
185 ctrlpriv->ctl_ib_dec_bytes =
186 debugfs_create_u64("ib_bytes_decrypted",
187 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
188 ctrlpriv->ctl, &perfmon->ib_dec_bytes);
189 ctrlpriv->ctl_ib_valid_bytes =
190 debugfs_create_u64("ib_bytes_validated",
191 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
192 ctrlpriv->ctl, &perfmon->ib_valid_bytes);
193
194 /* Controller level - global status values */
195 ctrlpriv->ctl_faultaddr =
196 debugfs_create_u64("fault_addr",
197 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
198 ctrlpriv->ctl, &perfmon->faultaddr);
199 ctrlpriv->ctl_faultdetail =
200 debugfs_create_u32("fault_detail",
201 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
202 ctrlpriv->ctl, &perfmon->faultdetail);
203 ctrlpriv->ctl_faultstatus =
204 debugfs_create_u32("fault_status",
205 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
206 ctrlpriv->ctl, &perfmon->status);
207
208 /* Internal covering keys (useful in non-secure mode only) */
209 ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
210 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
211 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
212 S_IFCHR | S_IRUSR |
213 S_IRGRP | S_IROTH,
214 ctrlpriv->ctl,
215 &ctrlpriv->ctl_kek_wrap);
216
217 ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
218 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
219 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
220 S_IFCHR | S_IRUSR |
221 S_IRGRP | S_IROTH,
222 ctrlpriv->ctl,
223 &ctrlpriv->ctl_tkek_wrap);
224
225 ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
226 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
227 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
228 S_IFCHR | S_IRUSR |
229 S_IRGRP | S_IROTH,
230 ctrlpriv->ctl,
231 &ctrlpriv->ctl_tdsk_wrap);
232#endif
233 return 0;
234}
235
236static struct of_device_id caam_match[] = {
237 {
238 .compatible = "fsl,sec-v4.0",
239 },
240 {},
241};
242MODULE_DEVICE_TABLE(of, caam_match);
243
244static struct platform_driver caam_driver = {
245 .driver = {
246 .name = "caam",
247 .owner = THIS_MODULE,
248 .of_match_table = caam_match,
249 },
250 .probe = caam_probe,
251 .remove = __devexit_p(caam_remove),
252};
253
254static int __init caam_base_init(void)
255{
256 return platform_driver_register(&caam_driver);
257}
258
259static void __exit caam_base_exit(void)
260{
261 return platform_driver_unregister(&caam_driver);
262}
263
264module_init(caam_base_init);
265module_exit(caam_base_exit);
266
267MODULE_LICENSE("GPL");
268MODULE_DESCRIPTION("FSL CAAM request backend");
269MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
new file mode 100644
index 000000000000..974a75842da9
--- /dev/null
+++ b/drivers/crypto/caam/desc.h
@@ -0,0 +1,1605 @@
1/*
2 * CAAM descriptor composition header
3 * Definitions to support CAAM descriptor instruction generation
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#ifndef DESC_H
9#define DESC_H
10
11/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
12#define MAX_CAAM_DESCSIZE 64
13
14/* Block size of any entity covered/uncovered with a KEK/TKEK */
15#define KEK_BLOCKSIZE 16
16
17/*
18 * Supported descriptor command types as they show up
19 * inside a descriptor command word.
20 */
21#define CMD_SHIFT 27
22#define CMD_MASK 0xf8000000
23
24#define CMD_KEY (0x00 << CMD_SHIFT)
25#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
26#define CMD_LOAD (0x02 << CMD_SHIFT)
27#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
28#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
29#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
30#define CMD_STORE (0x0a << CMD_SHIFT)
31#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
32#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
33#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
34#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
35#define CMD_MOVE (0x0f << CMD_SHIFT)
36#define CMD_OPERATION (0x10 << CMD_SHIFT)
37#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
38#define CMD_JUMP (0x14 << CMD_SHIFT)
39#define CMD_MATH (0x15 << CMD_SHIFT)
40#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
41#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
42#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
43#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
44
45/* General-purpose class selector for all commands */
46#define CLASS_SHIFT 25
47#define CLASS_MASK (0x03 << CLASS_SHIFT)
48
49#define CLASS_NONE (0x00 << CLASS_SHIFT)
50#define CLASS_1 (0x01 << CLASS_SHIFT)
51#define CLASS_2 (0x02 << CLASS_SHIFT)
52#define CLASS_BOTH (0x03 << CLASS_SHIFT)
53
54/*
55 * Descriptor header command constructs
56 * Covers shared, job, and trusted descriptor headers
57 */
58
59/*
60 * Do Not Run - marks a descriptor inexecutable if there was
61 * a preceding error somewhere
62 */
63#define HDR_DNR 0x01000000
64
65/*
66 * ONE - should always be set. Combination of ONE (always
67 * set) and ZRO (always clear) forms an endianness sanity check
68 */
69#define HDR_ONE 0x00800000
70#define HDR_ZRO 0x00008000
71
72/* Start Index or SharedDesc Length */
73#define HDR_START_IDX_MASK 0x3f
74#define HDR_START_IDX_SHIFT 16
75
76/* If shared descriptor header, 6-bit length */
77#define HDR_DESCLEN_SHR_MASK 0x3f
78
79/* If non-shared header, 7-bit length */
80#define HDR_DESCLEN_MASK 0x7f
81
82/* This is a TrustedDesc (if not SharedDesc) */
83#define HDR_TRUSTED 0x00004000
84
85/* Make into TrustedDesc (if not SharedDesc) */
86#define HDR_MAKE_TRUSTED 0x00002000
87
88/* Save context if self-shared (if SharedDesc) */
89#define HDR_SAVECTX 0x00001000
90
91/* Next item points to SharedDesc */
92#define HDR_SHARED 0x00001000
93
94/*
95 * Reverse Execution Order - execute JobDesc first, then
96 * execute SharedDesc (normally SharedDesc goes first).
97 */
98#define HDR_REVERSE 0x00000800
99
100/* Propogate DNR property to SharedDesc */
101#define HDR_PROP_DNR 0x00000800
102
103/* JobDesc/SharedDesc share property */
104#define HDR_SD_SHARE_MASK 0x03
105#define HDR_SD_SHARE_SHIFT 8
106#define HDR_JD_SHARE_MASK 0x07
107#define HDR_JD_SHARE_SHIFT 8
108
109#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
110#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
111#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
112#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
113#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
114
115/* JobDesc/SharedDesc descriptor length */
116#define HDR_JD_LENGTH_MASK 0x7f
117#define HDR_SD_LENGTH_MASK 0x3f
118
119/*
120 * KEY/SEQ_KEY Command Constructs
121 */
122
123/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
124#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
125#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
126
127/* Scatter-Gather Table/Variable Length Field */
128#define KEY_SGF 0x01000000
129#define KEY_VLF 0x01000000
130
131/* Immediate - Key follows command in the descriptor */
132#define KEY_IMM 0x00800000
133
134/*
135 * Encrypted - Key is encrypted either with the KEK, or
136 * with the TDKEK if TK is set
137 */
138#define KEY_ENC 0x00400000
139
140/*
141 * No Write Back - Do not allow key to be FIFO STOREd
142 */
143#define KEY_NWB 0x00200000
144
145/*
146 * Enhanced Encryption of Key
147 */
148#define KEY_EKT 0x00100000
149
150/*
151 * Encrypted with Trusted Key
152 */
153#define KEY_TK 0x00008000
154
155/*
156 * KDEST - Key Destination: 0 - class key register,
157 * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
158 */
159#define KEY_DEST_SHIFT 16
160#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
161
162#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
163#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
164#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
165#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
166
167/* Length in bytes */
168#define KEY_LENGTH_MASK 0x000003ff
169
170/*
171 * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
172 */
173
174/*
175 * Load/Store Destination: 0 = class independent CCB,
176 * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
177 */
178#define LDST_CLASS_SHIFT 25
179#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
180#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
181#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
182#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
183#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
184
185/* Scatter-Gather Table/Variable Length Field */
186#define LDST_SGF 0x01000000
187#define LDST_VLF LDST_SGF
188
189/* Immediate - Key follows this command in descriptor */
190#define LDST_IMM_MASK 1
191#define LDST_IMM_SHIFT 23
192#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
193
194/* SRC/DST - Destination for LOAD, Source for STORE */
195#define LDST_SRCDST_SHIFT 16
196#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
197
198#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
199#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
200#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
201#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
202
203#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
204#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
205#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
206#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
207#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
208#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
209#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
210#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
211#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
212#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
213#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
214#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
215#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
216#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
217#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
218#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
219#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
220#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
221#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
222#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
223#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
224#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
225#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
226
227/* Offset in source/destination */
228#define LDST_OFFSET_SHIFT 8
229#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
230
231/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
232/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
233#define LDOFF_CHG_SHARE_SHIFT 0
234#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
235#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
236#define LDOFF_CHG_SHARE_OK_NO_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
237#define LDOFF_CHG_SHARE_OK_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
238
239#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
240#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
241
242#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
243#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
244#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
245#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
246#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
247
248#define LDOFF_CHG_SEQLIODN_SHIFT 6
249#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
250#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
251#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
252#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
253
254/* Data length in bytes */
255#define LDST_LEN_SHIFT 0
256#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
257
258/* Special Length definitions when dst=deco-ctrl */
259#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
260#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
261#define LDLEN_RST_OFIFO (1 << 5)
262#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
263#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
264#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
265#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
266
267/*
268 * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
269 * Command Constructs
270 */
271
272/*
273 * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
274 * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
275 * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
276 */
277#define FIFOLD_CLASS_SHIFT 25
278#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
279#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
280#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
281#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
282#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
283
284#define FIFOST_CLASS_SHIFT 25
285#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
286#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
287#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
288#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
289
290/*
291 * Scatter-Gather Table/Variable Length Field
292 * If set for FIFO_LOAD, refers to a SG table. Within
293 * SEQ_FIFO_LOAD, is variable input sequence
294 */
295#define FIFOLDST_SGF_SHIFT 24
296#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
297#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
298#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
299#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
300
301/* Immediate - Data follows command in descriptor */
302#define FIFOLD_IMM_SHIFT 23
303#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
304#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
305
306/* Continue - Not the last FIFO store to come */
307#define FIFOST_CONT_SHIFT 23
308#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
309#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
310
311/*
312 * Extended Length - use 32-bit extended length that
313 * follows the pointer field. Illegal with IMM set
314 */
315#define FIFOLDST_EXT_SHIFT 22
316#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
317#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
318
319/* Input data type.*/
320#define FIFOLD_TYPE_SHIFT 16
321#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
322#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
323
324/* PK types */
325#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
326#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
327#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
328#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
329#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
330#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
331#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
332#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
333#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
334#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
335#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
336#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
337#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
338#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
339
340/* Other types. Need to OR in last/flush bits as desired */
341#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
342#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
343#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
344#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
345#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
346#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
347#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
348
349/* Last/Flush bits for use with "other" types above */
350#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
351#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
352#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
353#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
354#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
355#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
356#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
357#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
358#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
359
360#define FIFOLDST_LEN_MASK 0xffff
361#define FIFOLDST_EXT_LEN_MASK 0xffffffff
362
363/* Output data types */
364#define FIFOST_TYPE_SHIFT 16
365#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
366
367#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
368#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
369#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
370#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
371#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
372#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
373#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
374#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
375#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
376#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
377#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
378#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
379#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
380#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
381#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
382#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
383#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
384#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
385#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
386#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
387#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
388#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
389#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
390#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
391#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
392
393/*
394 * OPERATION Command Constructs
395 */
396
397/* Operation type selectors - OP TYPE */
398#define OP_TYPE_SHIFT 24
399#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
400
401#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
402#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
403#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
404#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
405#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
406#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
407
408/* ProtocolID selectors - PROTID */
409#define OP_PCLID_SHIFT 16
410#define OP_PCLID_MASK (0xff << 16)
411
412/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
413#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
414#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
415#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
416#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
417#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
418#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
419#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
420#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
421#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
422#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
423#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
424#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
425
426/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
427#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
428#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
429#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
430#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
431#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
432#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
433#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
434#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
435#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
436#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
437
438/*
439 * ProtocolInfo selectors
440 */
441#define OP_PCLINFO_MASK 0xffff
442
443/* for OP_PCLID_IPSEC */
444#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
445#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
446
447#define OP_PCL_IPSEC_DES_IV64 0x0100
448#define OP_PCL_IPSEC_DES 0x0200
449#define OP_PCL_IPSEC_3DES 0x0300
450#define OP_PCL_IPSEC_AES_CBC 0x0c00
451#define OP_PCL_IPSEC_AES_CTR 0x0d00
452#define OP_PCL_IPSEC_AES_XTS 0x1600
453#define OP_PCL_IPSEC_AES_CCM8 0x0e00
454#define OP_PCL_IPSEC_AES_CCM12 0x0f00
455#define OP_PCL_IPSEC_AES_CCM16 0x1000
456#define OP_PCL_IPSEC_AES_GCM8 0x1200
457#define OP_PCL_IPSEC_AES_GCM12 0x1300
458#define OP_PCL_IPSEC_AES_GCM16 0x1400
459
460#define OP_PCL_IPSEC_HMAC_NULL 0x0000
461#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
462#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
463#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
464#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
465#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
466#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
467#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
468#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
469
470/* For SRTP - OP_PCLID_SRTP */
471#define OP_PCL_SRTP_CIPHER_MASK 0xff00
472#define OP_PCL_SRTP_AUTH_MASK 0x00ff
473
474#define OP_PCL_SRTP_AES_CTR 0x0d00
475
476#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
477
478/* For SSL 3.0 - OP_PCLID_SSL30 */
479#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
480#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
481#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
482#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
483#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
484#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
485#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
486#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
487#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
488#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
489#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
490#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
491#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
492#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
493#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
494#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
495#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
496
497#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
498#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
499#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
500#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
501#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
502#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
503#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
504#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
505#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
506#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
507#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
508#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
509#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
510#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
511#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
512#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
513#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
514
515#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
516
517#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
518#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
519#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
520#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
521#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
522#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
523#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
524#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
525#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
526#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
527#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
528#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
529#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
530#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
531#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
532#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
533#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
534#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
535
536#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
537
538#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
539
540#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
541#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
542#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
543#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
544#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
545#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
546#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
547
548#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
549#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
550#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
551#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
552#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
553#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
554#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
555
556#define OP_PCL_SSL30_RC4_128_MD5 0x0024
557#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
558#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
559
560#define OP_PCL_SSL30_RC4_40_MD5 0x002b
561#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
562#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
563
564#define OP_PCL_SSL30_RC4_128_SHA 0x0020
565#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
566#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
567#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
568#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
569#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
570#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
571#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
572#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
573#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
574
575#define OP_PCL_SSL30_RC4_40_SHA 0x0028
576
577
578/* For TLS 1.0 - OP_PCLID_TLS10 */
579#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
580#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
581#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
582#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
583#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
584#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
585#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
586#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
587#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
588#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
589#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
590#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
591#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
592#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
593#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
594#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
595#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
596
597#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
598#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
599#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
600#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
601#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
602#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
603#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
604#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
605#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
606#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
607#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
608#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
609#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
610#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
611#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
612#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
613#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
614
615/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
616
617#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
618#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
619#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
620#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
621#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
622#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
623#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
624#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
625#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
626#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
627#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
628#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
629#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
630#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
631#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
632#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
633#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
634#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
635
636#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
637
638#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
639
640#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
641#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
642#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
643#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
644#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
645#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
646#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
647
648
649#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
650#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
651#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
652#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
653#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
654#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
655#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
656
657#define OP_PCL_TLS10_RC4_128_MD5 0x0024
658#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
659#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
660
661#define OP_PCL_TLS10_RC4_40_MD5 0x002b
662#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
663#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
664
665#define OP_PCL_TLS10_RC4_128_SHA 0x0020
666#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
667#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
668#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
669#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
670#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
671#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
672#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
673#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
674#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
675
676#define OP_PCL_TLS10_RC4_40_SHA 0x0028
677
678#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
679#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
680#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
681#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
682#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
683#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
684#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
685#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
686#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
687#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
688#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
689#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
690#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
691#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
692#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
693#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
694#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
695#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
696#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
697#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
698#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
699
700
701
702/* For TLS 1.1 - OP_PCLID_TLS11 */
703#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
704#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
705#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
706#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
707#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
708#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
709#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
710#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
711#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
712#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
713#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
714#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
715#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
716#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
717#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
718#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
719#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
720
721#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
722#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
723#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
724#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
725#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
726#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
727#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
728#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
729#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
730#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
731#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
732#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
733#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
734#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
735#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
736#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
737#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
738
739/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
740
741#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
742#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
743#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
744#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
745#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
746#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
747#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
748#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
749#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
750#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
751#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
752#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
753#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
754#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
755#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
756#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
757#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
758#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
759
760#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
761
762#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
763
764#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
765#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
766#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
767#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
768#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
769#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
770#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
771
772#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
773#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
774#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
775#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
776#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
777#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
778#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
779
780#define OP_PCL_TLS11_RC4_128_MD5 0x0024
781#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
782#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
783
784#define OP_PCL_TLS11_RC4_40_MD5 0x002b
785#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
786#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
787
788#define OP_PCL_TLS11_RC4_128_SHA 0x0020
789#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
790#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
791#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
792#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
793#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
794#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
795#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
796#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
797#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
798
799#define OP_PCL_TLS11_RC4_40_SHA 0x0028
800
801#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
802#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
803#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
804#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
805#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
806#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
807#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
808#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
809#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
810#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
811#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
812#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
813#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
814#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
815#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
816#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
817#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
818#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
819#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
820#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
821#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
822
823
824/* For TLS 1.2 - OP_PCLID_TLS12 */
825#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
826#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
827#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
828#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
829#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
830#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
831#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
832#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
833#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
834#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
835#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
836#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
837#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
838#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
839#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
840#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
841#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
842
843#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
844#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
845#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
846#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
847#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
848#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
849#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
850#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
851#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
852#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
853#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
854#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
855#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
856#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
857#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
858#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
859#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
860
861/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
862
863#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
864#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
865#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
866#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
867#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
868#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
869#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
870#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
871#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
872#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
873#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
874#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
875#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
876#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
877#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
878#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
879#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
880#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
881
882#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
883
884#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
885
886#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
887#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
888#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
889#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
890#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
891#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
892#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
893
894#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
895#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
896#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
897#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
898#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
899#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
900#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
901
902#define OP_PCL_TLS12_RC4_128_MD5 0x0024
903#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
904#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
905
906#define OP_PCL_TLS12_RC4_40_MD5 0x002b
907#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
908#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
909
910#define OP_PCL_TLS12_RC4_128_SHA 0x0020
911#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
912#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
913#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
914#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
915#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
916#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
917#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
918#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
919#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
920
921#define OP_PCL_TLS12_RC4_40_SHA 0x0028
922
923/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
924#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
925#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
926#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
927#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
928#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
929
930/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
931#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
932#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
933#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
934#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
935#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
936
937/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
938
939#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
940#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
941#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
942#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
943#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
944#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
945#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
946#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
947#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
948#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
949#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
950#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
951#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
952#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
953#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
954#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
955#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
956#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
957#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
958#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
959#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
960
961/* For DTLS - OP_PCLID_DTLS */
962
963#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
964#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
965#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
966#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
967#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
968#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
969#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
970#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
971#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
972#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
973#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
974#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
975#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
976#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
977#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
978#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
979#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
980
981#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
982#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
983#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
984#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
985#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
986#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
987#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
988#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
989#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
990#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
991#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
992#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
993#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
994#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
995#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
996#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
997#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
998
999/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
1000
1001#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
1002#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
1003#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
1004#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
1005#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
1006#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
1007#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
1008#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
1009#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
1010#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
1011#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
1012#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
1013#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
1014#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
1015#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
1016#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
1017#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
1018#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
1019
1020#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
1021
1022#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
1023
1024#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
1025#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
1026#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
1027#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
1028#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
1029#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
1030#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
1031
1032
1033#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
1034#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
1035#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
1036#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
1037#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
1038#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
1039#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
1040
1041
1042#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
1043#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
1044#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
1045#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
1046#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
1047#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
1048#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
1049#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
1050#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
1051#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
1052#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
1053#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
1054#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
1055#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
1056#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
1057#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
1058#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
1059#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
1060#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
1061#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
1062#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
1063
1064/* 802.16 WiMAX protinfos */
1065#define OP_PCL_WIMAX_OFDM 0x0201
1066#define OP_PCL_WIMAX_OFDMA 0x0231
1067
1068/* 802.11 WiFi protinfos */
1069#define OP_PCL_WIFI 0xac04
1070
1071/* MacSec protinfos */
1072#define OP_PCL_MACSEC 0x0001
1073
1074/* PKI unidirectional protocol protinfo bits */
1075#define OP_PCL_PKPROT_TEST 0x0008
1076#define OP_PCL_PKPROT_DECRYPT 0x0004
1077#define OP_PCL_PKPROT_ECC 0x0002
1078#define OP_PCL_PKPROT_F2M 0x0001
1079
1080/* For non-protocol/alg-only op commands */
1081#define OP_ALG_TYPE_SHIFT 24
1082#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
1083#define OP_ALG_TYPE_CLASS1 2
1084#define OP_ALG_TYPE_CLASS2 4
1085
1086#define OP_ALG_ALGSEL_SHIFT 16
1087#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
1088#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
1089#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
1090#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
1091#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
1092#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
1093#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
1094#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
1095#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
1096#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
1097#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
1098#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
1099#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
1100#define OP_ALG_ALGSEL_SNOW (0x60 << OP_ALG_ALGSEL_SHIFT)
1101#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
1102#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
1103#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
1104#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
1105
1106#define OP_ALG_AAI_SHIFT 4
1107#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
1108
1109/* blockcipher AAI set */
1110#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
1111#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
1112#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
1113#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
1114#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
1115#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
1116#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
1117#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
1118#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
1119#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
1120#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
1121#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
1122#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
1123#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
1124#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
1125#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
1126#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
1127#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
1128#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
1129#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
1130#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
1131#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
1132#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
1133#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
1134#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
1135#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
1136#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
1137#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
1138#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
1139
1140/* randomizer AAI set */
1141#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
1142#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT)
1143#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT)
1144
1145/* hmac/smac AAI set */
1146#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
1147#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
1148#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
1149#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
1150
1151/* CRC AAI set*/
1152#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
1153#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
1154#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
1155#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
1156#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
1157#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
1158
1159/* Kasumi/SNOW AAI set */
1160#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
1161#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
1162#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
1163#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
1164
1165
1166#define OP_ALG_AS_SHIFT 2
1167#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
1168#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
1169#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
1170#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
1171#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
1172
1173#define OP_ALG_ICV_SHIFT 1
1174#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
1175#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT)
1176#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT)
1177
1178#define OP_ALG_DIR_SHIFT 0
1179#define OP_ALG_DIR_MASK 1
1180#define OP_ALG_DECRYPT 0
1181#define OP_ALG_ENCRYPT 1
1182
1183/* PKHA algorithm type set */
1184#define OP_ALG_PK 0x00800000
1185#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
1186
1187/* PKHA mode clear memory functions */
1188#define OP_ALG_PKMODE_A_RAM 0x80000
1189#define OP_ALG_PKMODE_B_RAM 0x40000
1190#define OP_ALG_PKMODE_E_RAM 0x20000
1191#define OP_ALG_PKMODE_N_RAM 0x10000
1192#define OP_ALG_PKMODE_CLEARMEM 0x00001
1193
1194/* PKHA mode modular-arithmetic functions */
1195#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
1196#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
1197#define OP_ALG_PKMODE_MOD_F2M 0x20000
1198#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
1199#define OP_ALG_PKMODE_PRJECTV 0x00800
1200#define OP_ALG_PKMODE_TIME_EQ 0x400
1201#define OP_ALG_PKMODE_OUT_B 0x000
1202#define OP_ALG_PKMODE_OUT_A 0x100
1203#define OP_ALG_PKMODE_MOD_ADD 0x002
1204#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
1205#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
1206#define OP_ALG_PKMODE_MOD_MULT 0x005
1207#define OP_ALG_PKMODE_MOD_EXPO 0x006
1208#define OP_ALG_PKMODE_MOD_REDUCT 0x007
1209#define OP_ALG_PKMODE_MOD_INV 0x008
1210#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
1211#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
1212#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
1213#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
1214#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
1215#define OP_ALG_PKMODE_MOD_GCD 0x00e
1216#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
1217
1218/* PKHA mode copy-memory functions */
1219#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
1220#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1221#define OP_ALG_PKMODE_DST_REG_SHIFT 10
1222#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
1223#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
1224#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1225#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
1226#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1227
1228#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1229#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1230#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1231#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
1232#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
1233#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
1234#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
1235#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1236#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1237#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1238#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1239#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1240#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1241#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1242#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1243#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
1244#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
1245
1246/*
1247 * SEQ_IN_PTR Command Constructs
1248 */
1249
1250/* Release Buffers */
1251#define SQIN_RBS 0x04000000
1252
1253/* Sequence pointer is really a descriptor */
1254#define SQIN_INL 0x02000000
1255
1256/* Sequence pointer is a scatter-gather table */
1257#define SQIN_SGF 0x01000000
1258
1259/* Appends to a previous pointer */
1260#define SQIN_PRE 0x00800000
1261
1262/* Use extended length following pointer */
1263#define SQIN_EXT 0x00400000
1264
1265/* Restore sequence with pointer/length */
1266#define SQIN_RTO 0x00200000
1267
1268/* Replace job descriptor */
1269#define SQIN_RJD 0x00100000
1270
1271#define SQIN_LEN_SHIFT 0
1272#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
1273
1274/*
1275 * SEQ_OUT_PTR Command Constructs
1276 */
1277
1278/* Sequence pointer is a scatter-gather table */
1279#define SQOUT_SGF 0x01000000
1280
1281/* Appends to a previous pointer */
1282#define SQOUT_PRE 0x00800000
1283
1284/* Restore sequence with pointer/length */
1285#define SQOUT_RTO 0x00200000
1286
1287/* Use extended length following pointer */
1288#define SQOUT_EXT 0x00400000
1289
1290#define SQOUT_LEN_SHIFT 0
1291#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
1292
1293
1294/*
1295 * SIGNATURE Command Constructs
1296 */
1297
1298/* TYPE field is all that's relevant */
1299#define SIGN_TYPE_SHIFT 16
1300#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
1301
1302#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
1303#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
1304#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
1305#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
1306#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
1307#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
1308
1309/*
1310 * MOVE Command Constructs
1311 */
1312
1313#define MOVE_AUX_SHIFT 25
1314#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
1315#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
1316#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
1317
1318#define MOVE_WAITCOMP_SHIFT 24
1319#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
1320#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
1321
1322#define MOVE_SRC_SHIFT 20
1323#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
1324#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
1325#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
1326#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
1327#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
1328#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
1329#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
1330#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
1331#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
1332#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
1333#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
1334
1335#define MOVE_DEST_SHIFT 16
1336#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
1337#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
1338#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
1339#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
1340#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
1341#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
1342#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
1343#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
1344#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
1345#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
1346#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
1347#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
1348#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
1349#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
1350
1351#define MOVE_OFFSET_SHIFT 8
1352#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
1353
1354#define MOVE_LEN_SHIFT 0
1355#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
1356
1357#define MOVELEN_MRSEL_SHIFT 0
1358#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
1359
1360/*
1361 * MATH Command Constructs
1362 */
1363
1364#define MATH_IFB_SHIFT 26
1365#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
1366#define MATH_IFB (1 << MATH_IFB_SHIFT)
1367
1368#define MATH_NFU_SHIFT 25
1369#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
1370#define MATH_NFU (1 << MATH_NFU_SHIFT)
1371
1372#define MATH_STL_SHIFT 24
1373#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
1374#define MATH_STL (1 << MATH_STL_SHIFT)
1375
1376/* Function selectors */
1377#define MATH_FUN_SHIFT 20
1378#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
1379#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
1380#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
1381#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
1382#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
1383#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
1384#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
1385#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
1386#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
1387#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
1388#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
1389#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
1390
1391/* Source 0 selectors */
1392#define MATH_SRC0_SHIFT 16
1393#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
1394#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
1395#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
1396#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
1397#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
1398#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
1399#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
1400#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
1401#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
1402#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
1403#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
1404
1405/* Source 1 selectors */
1406#define MATH_SRC1_SHIFT 12
1407#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
1408#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
1409#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
1410#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
1411#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
1412#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
1413#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
1414#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
1415#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
1416
1417/* Destination selectors */
1418#define MATH_DEST_SHIFT 8
1419#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
1420#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
1421#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
1422#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
1423#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
1424#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
1425#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
1426#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
1427#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
1428#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
1429
1430/* Length selectors */
1431#define MATH_LEN_SHIFT 0
1432#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
1433#define MATH_LEN_1BYTE 0x01
1434#define MATH_LEN_2BYTE 0x02
1435#define MATH_LEN_4BYTE 0x04
1436#define MATH_LEN_8BYTE 0x08
1437
1438/*
1439 * JUMP Command Constructs
1440 */
1441
1442#define JUMP_CLASS_SHIFT 25
1443#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
1444#define JUMP_CLASS_NONE 0
1445#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
1446#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
1447#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
1448
1449#define JUMP_JSL_SHIFT 24
1450#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
1451#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
1452
1453#define JUMP_TYPE_SHIFT 22
1454#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
1455#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
1456#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
1457#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
1458#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
1459
1460#define JUMP_TEST_SHIFT 16
1461#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
1462#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
1463#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
1464#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
1465#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
1466
1467/* Condition codes. JSL bit is factored in */
1468#define JUMP_COND_SHIFT 8
1469#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
1470#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
1471#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
1472#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
1473#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
1474#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
1475#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
1476#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
1477
1478#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
1479#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
1480#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
1481#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
1482#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
1483#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
1484#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
1485#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
1486
1487#define JUMP_OFFSET_SHIFT 0
1488#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
1489
1490/*
1491 * NFIFO ENTRY
1492 * Data Constructs
1493 *
1494 */
1495#define NFIFOENTRY_DEST_SHIFT 30
1496#define NFIFOENTRY_DEST_MASK (3 << NFIFOENTRY_DEST_SHIFT)
1497#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
1498#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
1499#define NFIFOENTRY_DEST_CLASS2 (2 << NFIFOENTRY_DEST_SHIFT)
1500#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT)
1501
1502#define NFIFOENTRY_LC2_SHIFT 29
1503#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
1504#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
1505
1506#define NFIFOENTRY_LC1_SHIFT 28
1507#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
1508#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
1509
1510#define NFIFOENTRY_FC2_SHIFT 27
1511#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
1512#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
1513
1514#define NFIFOENTRY_FC1_SHIFT 26
1515#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
1516#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
1517
1518#define NFIFOENTRY_STYPE_SHIFT 24
1519#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
1520#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
1521#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
1522#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
1523#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
1524
1525#define NFIFOENTRY_DTYPE_SHIFT 20
1526#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
1527
1528#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
1529#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
1530#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
1531#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
1532#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
1533#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
1534#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
1535
1536#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
1537#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
1538#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
1539#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
1540#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
1541#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
1542#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
1543#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
1544#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
1545#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
1546#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
1547#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
1548
1549
1550#define NFIFOENTRY_BND_SHIFT 19
1551#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
1552#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
1553
1554#define NFIFOENTRY_PTYPE_SHIFT 16
1555#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
1556
1557#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
1558#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
1559#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
1560#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
1561#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
1562#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
1563#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
1564#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
1565
1566#define NFIFOENTRY_OC_SHIFT 15
1567#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
1568#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
1569
1570#define NFIFOENTRY_AST_SHIFT 14
1571#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
1572#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
1573
1574#define NFIFOENTRY_BM_SHIFT 11
1575#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
1576#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
1577
1578#define NFIFOENTRY_PS_SHIFT 10
1579#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
1580#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
1581
1582
1583#define NFIFOENTRY_DLEN_SHIFT 0
1584#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
1585
1586#define NFIFOENTRY_PLEN_SHIFT 0
1587#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
1588
1589/*
1590 * PDB internal definitions
1591 */
1592
1593/* IPSec ESP CBC Encap/Decap Options */
1594#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
1595#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
1596#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
1597#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
1598#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
1599#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
1600#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
1601#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
1602#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
1603#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
1604
1605#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
new file mode 100644
index 000000000000..46915800c26f
--- /dev/null
+++ b/drivers/crypto/caam/desc_constr.h
@@ -0,0 +1,205 @@
1/*
2 * caam descriptor construction helper functions
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#include "desc.h"
8
9#define IMMEDIATE (1 << 23)
10#define CAAM_CMD_SZ sizeof(u32)
11#define CAAM_PTR_SZ sizeof(dma_addr_t)
12#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64)
13
14#ifdef DEBUG
15#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
16 &__func__[sizeof("append")]); } while (0)
17#else
18#define PRINT_POS
19#endif
20
21#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
22 LDST_SRCDST_WORD_DECOCTRL | \
23 (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
24#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
25 LDST_SRCDST_WORD_DECOCTRL | \
26 (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
27
28static inline int desc_len(u32 *desc)
29{
30 return *desc & HDR_DESCLEN_MASK;
31}
32
33static inline int desc_bytes(void *desc)
34{
35 return desc_len(desc) * CAAM_CMD_SZ;
36}
37
38static inline u32 *desc_end(u32 *desc)
39{
40 return desc + desc_len(desc);
41}
42
43static inline void *sh_desc_pdb(u32 *desc)
44{
45 return desc + 1;
46}
47
48static inline void init_desc(u32 *desc, u32 options)
49{
50 *desc = options | HDR_ONE | 1;
51}
52
53static inline void init_sh_desc(u32 *desc, u32 options)
54{
55 PRINT_POS;
56 init_desc(desc, CMD_SHARED_DESC_HDR | options);
57}
58
59static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
60{
61 u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1;
62
63 init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) |
64 options);
65}
66
67static inline void init_job_desc(u32 *desc, u32 options)
68{
69 init_desc(desc, CMD_DESC_HDR | options);
70}
71
72static inline void append_ptr(u32 *desc, dma_addr_t ptr)
73{
74 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
75
76 *offset = ptr;
77
78 (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
79}
80
81static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
82 u32 options)
83{
84 PRINT_POS;
85 init_job_desc(desc, HDR_SHARED | options |
86 (len << HDR_START_IDX_SHIFT));
87 append_ptr(desc, ptr);
88}
89
90static inline void append_data(u32 *desc, void *data, int len)
91{
92 u32 *offset = desc_end(desc);
93
94 if (len) /* avoid sparse warning: memcpy with byte count of 0 */
95 memcpy(offset, data, len);
96
97 (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
98}
99
100static inline void append_cmd(u32 *desc, u32 command)
101{
102 u32 *cmd = desc_end(desc);
103
104 *cmd = command;
105
106 (*desc)++;
107}
108
109static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
110 u32 command)
111{
112 append_cmd(desc, command | len);
113 append_ptr(desc, ptr);
114}
115
116static inline void append_cmd_data(u32 *desc, void *data, int len,
117 u32 command)
118{
119 append_cmd(desc, command | IMMEDIATE | len);
120 append_data(desc, data, len);
121}
122
123static inline u32 *append_jump(u32 *desc, u32 options)
124{
125 u32 *cmd = desc_end(desc);
126
127 PRINT_POS;
128 append_cmd(desc, CMD_JUMP | options);
129
130 return cmd;
131}
132
133static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
134{
135 *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
136}
137
138#define APPEND_CMD(cmd, op) \
139static inline void append_##cmd(u32 *desc, u32 options) \
140{ \
141 PRINT_POS; \
142 append_cmd(desc, CMD_##op | options); \
143}
144APPEND_CMD(operation, OPERATION)
145APPEND_CMD(move, MOVE)
146
147#define APPEND_CMD_LEN(cmd, op) \
148static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
149{ \
150 PRINT_POS; \
151 append_cmd(desc, CMD_##op | len | options); \
152}
153APPEND_CMD_LEN(seq_store, SEQ_STORE)
154APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
155APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
156
157#define APPEND_CMD_PTR(cmd, op) \
158static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
159 u32 options) \
160{ \
161 PRINT_POS; \
162 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
163}
164APPEND_CMD_PTR(key, KEY)
165APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
166APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
167APPEND_CMD_PTR(load, LOAD)
168APPEND_CMD_PTR(store, STORE)
169APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
170APPEND_CMD_PTR(fifo_store, FIFO_STORE)
171
172#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
173static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
174 unsigned int len, u32 options) \
175{ \
176 PRINT_POS; \
177 append_cmd_data(desc, data, len, CMD_##op | options); \
178}
179APPEND_CMD_PTR_TO_IMM(load, LOAD);
180APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
181
182/*
183 * 2nd variant for commands whose specified immediate length differs
184 * from length of immediate data provided, e.g., split keys
185 */
186#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
187static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
188 unsigned int data_len, \
189 unsigned int len, u32 options) \
190{ \
191 PRINT_POS; \
192 append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
193 append_data(desc, data, data_len); \
194}
195APPEND_CMD_PTR_TO_IMM2(key, KEY);
196
197#define APPEND_CMD_RAW_IMM(cmd, op, type) \
198static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
199 u32 options) \
200{ \
201 PRINT_POS; \
202 append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
203 append_cmd(desc, immediate); \
204}
205APPEND_CMD_RAW_IMM(load, LOAD, u32);
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
new file mode 100644
index 000000000000..7e2d54bffad6
--- /dev/null
+++ b/drivers/crypto/caam/error.c
@@ -0,0 +1,248 @@
1/*
2 * CAAM Error Reporting
3 *
4 * Copyright 2009-2011 Freescale Semiconductor, Inc.
5 */
6
7#include "compat.h"
8#include "regs.h"
9#include "intern.h"
10#include "desc.h"
11#include "jr.h"
12#include "error.h"
13
14#define SPRINTFCAT(str, format, param, max_alloc) \
15{ \
16 char *tmp; \
17 \
18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
19 sprintf(tmp, format, param); \
20 strcat(str, tmp); \
21 kfree(tmp); \
22}
23
24static void report_jump_idx(u32 status, char *outstr)
25{
26 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
27 JRSTA_DECOERR_INDEX_SHIFT;
28
29 if (status & JRSTA_DECOERR_JUMP)
30 strcat(outstr, "jump tgt desc idx ");
31 else
32 strcat(outstr, "desc idx ");
33
34 SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
35}
36
37static void report_ccb_status(u32 status, char *outstr)
38{
39 char *cha_id_list[] = {
40 "",
41 "AES",
42 "DES, 3DES",
43 "ARC4",
44 "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512",
45 "RNG",
46 "SNOW f8",
47 "Kasumi f8, f9",
48 "All Public Key Algorithms",
49 "CRC",
50 "SNOW f9",
51 };
52 char *err_id_list[] = {
53 "None. No error.",
54 "Mode error.",
55 "Data size error.",
56 "Key size error.",
57 "PKHA A memory size error.",
58 "PKHA B memory size error.",
59 "Data arrived out of sequence error.",
60 "PKHA divide-by-zero error.",
61 "PKHA modulus even error.",
62 "DES key parity error.",
63 "ICV check failed.",
64 "Hardware error.",
65 "Unsupported CCM AAD size.",
66 "Class 1 CHA is not reset",
67 "Invalid CHA combination was selected",
68 "Invalid CHA selected.",
69 };
70 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
71 JRSTA_CCBERR_CHAID_SHIFT;
72 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
73
74 report_jump_idx(status, outstr);
75
76 if (cha_id < ARRAY_SIZE(cha_id_list)) {
77 SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id],
78 strlen(cha_id_list[cha_id]));
79 } else {
80 SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
81 cha_id, sizeof("ff"));
82 }
83
84 if (err_id < ARRAY_SIZE(err_id_list)) {
85 SPRINTFCAT(outstr, "%s", err_id_list[err_id],
86 strlen(err_id_list[err_id]));
87 } else {
88 SPRINTFCAT(outstr, "unidentified err_id value 0x%02x",
89 err_id, sizeof("ff"));
90 }
91}
92
93static void report_jump_status(u32 status, char *outstr)
94{
95 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
96}
97
98static void report_deco_status(u32 status, char *outstr)
99{
100 const struct {
101 u8 value;
102 char *error_text;
103 } desc_error_list[] = {
104 { 0x00, "None. No error." },
105 { 0x01, "SGT Length Error. The descriptor is trying to read "
106 "more data than is contained in the SGT table." },
107 { 0x02, "Reserved." },
108 { 0x03, "Job Ring Control Error. There is a bad value in the "
109 "Job Ring Control register." },
110 { 0x04, "Invalid Descriptor Command. The Descriptor Command "
111 "field is invalid." },
112 { 0x05, "Reserved." },
113 { 0x06, "Invalid KEY Command" },
114 { 0x07, "Invalid LOAD Command" },
115 { 0x08, "Invalid STORE Command" },
116 { 0x09, "Invalid OPERATION Command" },
117 { 0x0A, "Invalid FIFO LOAD Command" },
118 { 0x0B, "Invalid FIFO STORE Command" },
119 { 0x0C, "Invalid MOVE Command" },
120 { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
121 "invalid because the target is not a Job Header "
122 "Command, or the jump is from a Trusted Descriptor to "
123 "a Job Descriptor, or because the target Descriptor "
124 "contains a Shared Descriptor." },
125 { 0x0E, "Invalid MATH Command" },
126 { 0x0F, "Invalid SIGNATURE Command" },
127 { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
128 "Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
129 "LOAD, or SEQ FIFO STORE decremented the input or "
130 "output sequence length below 0. This error may result "
131 "if a built-in PROTOCOL Command has encountered a "
132 "malformed PDU." },
133 { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
134 { 0x12, "Shared Descriptor Header Error" },
135 { 0x13, "Header Error. Invalid length or parity, or certain "
136 "other problems." },
137 { 0x14, "Burster Error. Burster has gotten to an illegal "
138 "state" },
139 { 0x15, "Context Register Length Error. The descriptor is "
140 "trying to read or write past the end of the Context "
141 "Register. A SEQ LOAD or SEQ STORE with the VLF bit "
142 "set was executed with too large a length in the "
143 "variable length register (VSOL for SEQ STORE or VSIL "
144 "for SEQ LOAD)." },
145 { 0x16, "DMA Error" },
146 { 0x17, "Reserved." },
147 { 0x1A, "Job failed due to JR reset" },
148 { 0x1B, "Job failed due to Fail Mode" },
149 { 0x1C, "DECO Watchdog timer timeout error" },
150 { 0x1D, "DECO tried to copy a key from another DECO but the "
151 "other DECO's Key Registers were locked" },
152 { 0x1E, "DECO attempted to copy data from a DECO that had an "
153 "unmasked Descriptor error" },
154 { 0x1F, "LIODN error. DECO was trying to share from itself or "
155 "from another DECO but the two Non-SEQ LIODN values "
156 "didn't match or the 'shared from' DECO's Descriptor "
157 "required that the SEQ LIODNs be the same and they "
158 "aren't." },
159 { 0x20, "DECO has completed a reset initiated via the DRR "
160 "register" },
161 { 0x21, "Nonce error. When using EKT (CCM) key encryption "
162 "option in the FIFO STORE Command, the Nonce counter "
163 "reached its maximum value and this encryption mode "
164 "can no longer be used." },
165 { 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
166 "(input frame; block ciphers) and IPsec decap (output "
167 "frame, when doing the next header byte update) and "
168 "DCRC (output frame)." },
169 { 0x80, "DNR (do not run) error" },
170 { 0x81, "undefined protocol command" },
171 { 0x82, "invalid setting in PDB" },
172 { 0x83, "Anti-replay LATE error" },
173 { 0x84, "Anti-replay REPLAY error" },
174 { 0x85, "Sequence number overflow" },
175 { 0x86, "Sigver invalid signature" },
176 { 0x87, "DSA Sign Illegal test descriptor" },
177 { 0x88, "Protocol Format Error - A protocol has seen an error "
178 "in the format of data received. When running RSA, "
179 "this means that formatting with random padding was "
180 "used, and did not follow the form: 0x00, 0x02, 8-to-N "
181 "bytes of non-zero pad, 0x00, F data." },
182 { 0x89, "Protocol Size Error - A protocol has seen an error in "
183 "size. When running RSA, pdb size N < (size of F) when "
184 "no formatting is used; or pdb size N < (F + 11) when "
185 "formatting is used." },
186 { 0xC1, "Blob Command error: Undefined mode" },
187 { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
188 { 0xC4, "Blob Command error: Black Blob key or input size "
189 "error" },
190 { 0xC5, "Blob Command error: Invalid key destination" },
191 { 0xC8, "Blob Command error: Trusted/Secure mode error" },
192 { 0xF0, "IPsec TTL or hop limit field either came in as 0, "
193 "or was decremented to 0" },
194 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
195 };
196 u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
197 int i;
198
199 report_jump_idx(status, outstr);
200
201 for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
202 if (desc_error_list[i].value == desc_error)
203 break;
204
205 if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) {
206 SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text,
207 strlen(desc_error_list[i].error_text));
208 } else {
209 SPRINTFCAT(outstr, "unidentified error value 0x%02x",
210 desc_error, sizeof("ff"));
211 }
212}
213
214static void report_jr_status(u32 status, char *outstr)
215{
216 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
217}
218
219static void report_cond_code_status(u32 status, char *outstr)
220{
221 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
222}
223
224char *caam_jr_strstatus(char *outstr, u32 status)
225{
226 struct stat_src {
227 void (*report_ssed)(u32 status, char *outstr);
228 char *error;
229 } status_src[] = {
230 { NULL, "No error" },
231 { NULL, NULL },
232 { report_ccb_status, "CCB" },
233 { report_jump_status, "Jump" },
234 { report_deco_status, "DECO" },
235 { NULL, NULL },
236 { report_jr_status, "Job Ring" },
237 { report_cond_code_status, "Condition Code" },
238 };
239 u32 ssrc = status >> JRSTA_SSRC_SHIFT;
240
241 sprintf(outstr, "%s: ", status_src[ssrc].error);
242
243 if (status_src[ssrc].report_ssed)
244 status_src[ssrc].report_ssed(status, outstr);
245
246 return outstr;
247}
248EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
new file mode 100644
index 000000000000..02c7baa1748e
--- /dev/null
+++ b/drivers/crypto/caam/error.h
@@ -0,0 +1,11 @@
1/*
2 * CAAM Error Reporting code header
3 *
4 * Copyright 2009-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef CAAM_ERROR_H
8#define CAAM_ERROR_H
9#define CAAM_ERROR_STR_MAX 302
10extern char *caam_jr_strstatus(char *outstr, u32 status);
11#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
new file mode 100644
index 000000000000..a34be01b0b29
--- /dev/null
+++ b/drivers/crypto/caam/intern.h
@@ -0,0 +1,113 @@
1/*
2 * CAAM/SEC 4.x driver backend
3 * Private/internal definitions between modules
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 *
7 */
8
9#ifndef INTERN_H
10#define INTERN_H
11
12#define JOBR_UNASSIGNED 0
13#define JOBR_ASSIGNED 1
14
15/* Currently comes from Kconfig param as a ^2 (driver-required) */
16#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
17
18/* Kconfig params for interrupt coalescing if selected (else zero) */
19#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
20#define JOBR_INTC JRCFG_ICEN
21#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
22#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
23#else
24#define JOBR_INTC 0
25#define JOBR_INTC_TIME_THLD 0
26#define JOBR_INTC_COUNT_THLD 0
27#endif
28
29/*
30 * Storage for tracking each in-process entry moving across a ring
31 * Each entry on an output ring needs one of these
32 */
33struct caam_jrentry_info {
34 void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
35 void *cbkarg; /* Argument per ring entry */
36 u32 *desc_addr_virt; /* Stored virt addr for postprocessing */
37 dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */
38 u32 desc_size; /* Stored size for postprocessing, header derived */
39};
40
41/* Private sub-storage for a single JobR */
42struct caam_drv_private_jr {
43 struct device *parentdev; /* points back to controller dev */
44 int ridx;
45 struct caam_job_ring __iomem *rregs; /* JobR's register space */
46 struct tasklet_struct irqtask[NR_CPUS];
47 int irq; /* One per queue */
48 int assign; /* busy/free */
49
50 /* Job ring info */
51 int ringsize; /* Size of rings (assume input = output) */
52 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
53 spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
54 int inp_ring_write_index; /* Input index "tail" */
55 int head; /* entinfo (s/w ring) head index */
56 dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
57 spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
58 int out_ring_read_index; /* Output index "tail" */
59 int tail; /* entinfo (s/w ring) tail index */
60 struct jr_outentry *outring; /* Base of output ring, DMA-safe */
61};
62
63/*
64 * Driver-private storage for a single CAAM block instance
65 */
66struct caam_drv_private {
67
68 struct device *dev;
69 struct device **jrdev; /* Alloc'ed array per sub-device */
70 spinlock_t jr_alloc_lock;
71 struct platform_device *pdev;
72
73 /* Physical-presence section */
74 struct caam_ctrl *ctrl; /* controller region */
75 struct caam_deco **deco; /* DECO/CCB views */
76 struct caam_assurance *ac;
77 struct caam_queue_if *qi; /* QI control region */
78
79 /*
80 * Detected geometry block. Filled in from device tree if powerpc,
81 * or from register-based version detection code
82 */
83 u8 total_jobrs; /* Total Job Rings in device */
84 u8 qi_present; /* Nonzero if QI present in device */
85 int secvio_irq; /* Security violation interrupt number */
86
87 /* which jr allocated to scatterlist crypto */
88 atomic_t tfm_count ____cacheline_aligned;
89 int num_jrs_for_algapi;
90 struct device **algapi_jr;
91 /* list of registered crypto algorithms (mk generic context handle?) */
92 struct list_head alg_list;
93
94 /*
95 * debugfs entries for developer view into driver/device
96 * variables at runtime.
97 */
98#ifdef CONFIG_DEBUG_FS
99 struct dentry *dfs_root;
100 struct dentry *ctl; /* controller dir */
101 struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
102 struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
103 struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
104 struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
105
106 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
107 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
108#endif
109};
110
111void caam_jr_algapi_init(struct device *dev);
112void caam_jr_algapi_remove(struct device *dev);
113#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
new file mode 100644
index 000000000000..340fa322c0f0
--- /dev/null
+++ b/drivers/crypto/caam/jr.c
@@ -0,0 +1,517 @@
1/*
2 * CAAM/SEC 4.x transport/backend driver
3 * JobR backend functionality
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#include "compat.h"
9#include "regs.h"
10#include "jr.h"
11#include "desc.h"
12#include "intern.h"
13
14/* Main per-ring interrupt handler */
15static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
16{
17 struct device *dev = st_dev;
18 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
19 u32 irqstate;
20
21 /*
22 * Check the output ring for ready responses, kick
23 * tasklet if jobs done.
24 */
25 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
26 if (!irqstate)
27 return IRQ_NONE;
28
29 /*
30 * If JobR error, we got more development work to do
31 * Flag a bug now, but we really need to shut down and
32 * restart the queue (and fix code).
33 */
34 if (irqstate & JRINT_JR_ERROR) {
35 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
36 BUG();
37 }
38
39 /* mask valid interrupts */
40 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
41
42 /* Have valid interrupt at this point, just ACK and trigger */
43 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
44
45 preempt_disable();
46 tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
47 preempt_enable();
48
49 return IRQ_HANDLED;
50}
51
52/* Deferred service handler, run as interrupt-fired tasklet */
53static void caam_jr_dequeue(unsigned long devarg)
54{
55 int hw_idx, sw_idx, i, head, tail;
56 struct device *dev = (struct device *)devarg;
57 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
59 u32 *userdesc, userstatus;
60 void *userarg;
61 unsigned long flags;
62
63 spin_lock_irqsave(&jrp->outlock, flags);
64
65 head = ACCESS_ONCE(jrp->head);
66 sw_idx = tail = jrp->tail;
67
68 while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
69 rd_reg32(&jrp->rregs->outring_used)) {
70
71 hw_idx = jrp->out_ring_read_index;
72 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
73 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
74
75 smp_read_barrier_depends();
76
77 if (jrp->outring[hw_idx].desc ==
78 jrp->entinfo[sw_idx].desc_addr_dma)
79 break; /* found */
80 }
81 /* we should never fail to find a matching descriptor */
82 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
83
84 /* Unmap just-run descriptor so we can post-process */
85 dma_unmap_single(dev, jrp->outring[hw_idx].desc,
86 jrp->entinfo[sw_idx].desc_size,
87 DMA_TO_DEVICE);
88
89 /* mark completed, avoid matching on a recycled desc addr */
90 jrp->entinfo[sw_idx].desc_addr_dma = 0;
91
92 /* Stash callback params for use outside of lock */
93 usercall = jrp->entinfo[sw_idx].callbk;
94 userarg = jrp->entinfo[sw_idx].cbkarg;
95 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
96 userstatus = jrp->outring[hw_idx].jrstatus;
97
98 smp_mb();
99
100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
101 (JOBR_DEPTH - 1);
102
103 /*
104 * if this job completed out-of-order, do not increment
105 * the tail. Otherwise, increment tail by 1 plus the
106 * number of subsequent jobs already completed out-of-order
107 */
108 if (sw_idx == tail) {
109 do {
110 tail = (tail + 1) & (JOBR_DEPTH - 1);
111 smp_read_barrier_depends();
112 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
113 jrp->entinfo[tail].desc_addr_dma == 0);
114
115 jrp->tail = tail;
116 }
117
118 /* set done */
119 wr_reg32(&jrp->rregs->outring_rmvd, 1);
120
121 spin_unlock_irqrestore(&jrp->outlock, flags);
122
123 /* Finally, execute user's callback */
124 usercall(dev, userdesc, userstatus, userarg);
125
126 spin_lock_irqsave(&jrp->outlock, flags);
127
128 head = ACCESS_ONCE(jrp->head);
129 sw_idx = tail = jrp->tail;
130 }
131
132 spin_unlock_irqrestore(&jrp->outlock, flags);
133
134 /* reenable / unmask IRQs */
135 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
136}
137
138/**
139 * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
140 * an ordinal of the rings allocated, else returns -ENODEV if no rings
141 * are available.
142 * @ctrldev: points to the controller level dev (parent) that
143 * owns rings available for use.
144 * @dev: points to where a pointer to the newly allocated queue's
145 * dev can be written to if successful.
146 **/
147int caam_jr_register(struct device *ctrldev, struct device **rdev)
148{
149 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
150 struct caam_drv_private_jr *jrpriv = NULL;
151 unsigned long flags;
152 int ring;
153
154 /* Lock, if free ring - assign, unlock */
155 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
156 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
157 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
158 if (jrpriv->assign == JOBR_UNASSIGNED) {
159 jrpriv->assign = JOBR_ASSIGNED;
160 *rdev = ctrlpriv->jrdev[ring];
161 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
162 return ring;
163 }
164 }
165
166 /* If assigned, write dev where caller needs it */
167 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
168 *rdev = NULL;
169
170 return -ENODEV;
171}
172EXPORT_SYMBOL(caam_jr_register);
173
174/**
175 * caam_jr_deregister() - Deregister an API and release the queue.
176 * Returns 0 if OK, -EBUSY if queue still contains pending entries
177 * or unprocessed results at the time of the call
178 * @dev - points to the dev that identifies the queue to
179 * be released.
180 **/
181int caam_jr_deregister(struct device *rdev)
182{
183 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
184 struct caam_drv_private *ctrlpriv;
185 unsigned long flags;
186
187 /* Get the owning controller's private space */
188 ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
189
190 /*
191 * Make sure ring empty before release
192 */
193 if (rd_reg32(&jrpriv->rregs->outring_used) ||
194 (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
195 return -EBUSY;
196
197 /* Release ring */
198 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
199 jrpriv->assign = JOBR_UNASSIGNED;
200 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
201
202 return 0;
203}
204EXPORT_SYMBOL(caam_jr_deregister);
205
206/**
207 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
208 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
209 * descriptor.
210 * @dev: device of the job ring to be used. This device should have
211 * been assigned prior by caam_jr_register().
212 * @desc: points to a job descriptor that execute our request. All
213 * descriptors (and all referenced data) must be in a DMAable
214 * region, and all data references must be physical addresses
215 * accessible to CAAM (i.e. within a PAMU window granted
216 * to it).
217 * @cbk: pointer to a callback function to be invoked upon completion
218 * of this request. This has the form:
219 * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
220 * where:
221 * @dev: contains the job ring device that processed this
222 * response.
223 * @desc: descriptor that initiated the request, same as
224 * "desc" being argued to caam_jr_enqueue().
225 * @status: untranslated status received from CAAM. See the
226 * reference manual for a detailed description of
227 * error meaning, or see the JRSTA definitions in the
228 * register header file
229 * @areq: optional pointer to an argument passed with the
230 * original request
231 * @areq: optional pointer to a user argument for use at callback
232 * time.
233 **/
234int caam_jr_enqueue(struct device *dev, u32 *desc,
235 void (*cbk)(struct device *dev, u32 *desc,
236 u32 status, void *areq),
237 void *areq)
238{
239 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
240 struct caam_jrentry_info *head_entry;
241 unsigned long flags;
242 int head, tail, desc_size;
243 dma_addr_t desc_dma;
244
245 desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
246 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
247 if (dma_mapping_error(dev, desc_dma)) {
248 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
249 return -EIO;
250 }
251
252 spin_lock_irqsave(&jrp->inplock, flags);
253
254 head = jrp->head;
255 tail = ACCESS_ONCE(jrp->tail);
256
257 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
258 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
259 spin_unlock_irqrestore(&jrp->inplock, flags);
260 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
261 return -EBUSY;
262 }
263
264 head_entry = &jrp->entinfo[head];
265 head_entry->desc_addr_virt = desc;
266 head_entry->desc_size = desc_size;
267 head_entry->callbk = (void *)cbk;
268 head_entry->cbkarg = areq;
269 head_entry->desc_addr_dma = desc_dma;
270
271 jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
272
273 smp_wmb();
274
275 jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
276 (JOBR_DEPTH - 1);
277 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
278
279 wmb();
280
281 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
282
283 spin_unlock_irqrestore(&jrp->inplock, flags);
284
285 return 0;
286}
287EXPORT_SYMBOL(caam_jr_enqueue);
288
289static int caam_reset_hw_jr(struct device *dev)
290{
291 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
292 unsigned int timeout = 100000;
293
294 /*
295 * mask interrupts since we are going to poll
296 * for reset completion status
297 */
298 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
299
300 /* initiate flush (required prior to reset) */
301 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
302 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
303 JRINT_ERR_HALT_INPROGRESS) && --timeout)
304 cpu_relax();
305
306 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
307 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
308 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
309 return -EIO;
310 }
311
312 /* initiate reset */
313 timeout = 100000;
314 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
315 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
316 cpu_relax();
317
318 if (timeout == 0) {
319 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
320 return -EIO;
321 }
322
323 /* unmask interrupts */
324 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
325
326 return 0;
327}
328
329/*
330 * Init JobR independent of platform property detection
331 */
332static int caam_jr_init(struct device *dev)
333{
334 struct caam_drv_private_jr *jrp;
335 dma_addr_t inpbusaddr, outbusaddr;
336 int i, error;
337
338 jrp = dev_get_drvdata(dev);
339
340 /* Connect job ring interrupt handler. */
341 for_each_possible_cpu(i)
342 tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
343 (unsigned long)dev);
344
345 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
346 "caam-jobr", dev);
347 if (error) {
348 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
349 jrp->ridx, jrp->irq);
350 irq_dispose_mapping(jrp->irq);
351 jrp->irq = 0;
352 return -EINVAL;
353 }
354
355 error = caam_reset_hw_jr(dev);
356 if (error)
357 return error;
358
359 jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
360 GFP_KERNEL | GFP_DMA);
361 jrp->outring = kzalloc(sizeof(struct jr_outentry) *
362 JOBR_DEPTH, GFP_KERNEL | GFP_DMA);
363
364 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
365 GFP_KERNEL);
366
367 if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
368 (jrp->entinfo == NULL)) {
369 dev_err(dev, "can't allocate job rings for %d\n",
370 jrp->ridx);
371 return -ENOMEM;
372 }
373
374 for (i = 0; i < JOBR_DEPTH; i++)
375 jrp->entinfo[i].desc_addr_dma = !0;
376
377 /* Setup rings */
378 inpbusaddr = dma_map_single(dev, jrp->inpring,
379 sizeof(u32 *) * JOBR_DEPTH,
380 DMA_BIDIRECTIONAL);
381 if (dma_mapping_error(dev, inpbusaddr)) {
382 dev_err(dev, "caam_jr_init(): can't map input ring\n");
383 kfree(jrp->inpring);
384 kfree(jrp->outring);
385 kfree(jrp->entinfo);
386 return -EIO;
387 }
388
389 outbusaddr = dma_map_single(dev, jrp->outring,
390 sizeof(struct jr_outentry) * JOBR_DEPTH,
391 DMA_BIDIRECTIONAL);
392 if (dma_mapping_error(dev, outbusaddr)) {
393 dev_err(dev, "caam_jr_init(): can't map output ring\n");
394 dma_unmap_single(dev, inpbusaddr,
395 sizeof(u32 *) * JOBR_DEPTH,
396 DMA_BIDIRECTIONAL);
397 kfree(jrp->inpring);
398 kfree(jrp->outring);
399 kfree(jrp->entinfo);
400 return -EIO;
401 }
402
403 jrp->inp_ring_write_index = 0;
404 jrp->out_ring_read_index = 0;
405 jrp->head = 0;
406 jrp->tail = 0;
407
408 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
409 wr_reg64(&jrp->rregs->outring_base, outbusaddr);
410 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
411 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
412
413 jrp->ringsize = JOBR_DEPTH;
414
415 spin_lock_init(&jrp->inplock);
416 spin_lock_init(&jrp->outlock);
417
418 /* Select interrupt coalescing parameters */
419 setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
420 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
421 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
422
423 jrp->assign = JOBR_UNASSIGNED;
424 return 0;
425}
426
427/*
428 * Shutdown JobR independent of platform property code
429 */
430int caam_jr_shutdown(struct device *dev)
431{
432 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
433 dma_addr_t inpbusaddr, outbusaddr;
434 int ret, i;
435
436 ret = caam_reset_hw_jr(dev);
437
438 for_each_possible_cpu(i)
439 tasklet_kill(&jrp->irqtask[i]);
440
441 /* Release interrupt */
442 free_irq(jrp->irq, dev);
443
444 /* Free rings */
445 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
446 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
447 dma_unmap_single(dev, outbusaddr,
448 sizeof(struct jr_outentry) * JOBR_DEPTH,
449 DMA_BIDIRECTIONAL);
450 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
451 DMA_BIDIRECTIONAL);
452 kfree(jrp->outring);
453 kfree(jrp->inpring);
454 kfree(jrp->entinfo);
455
456 return ret;
457}
458
459/*
460 * Probe routine for each detected JobR subsystem. It assumes that
461 * property detection was picked up externally.
462 */
463int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
464 int ring)
465{
466 struct device *ctrldev, *jrdev;
467 struct platform_device *jr_pdev;
468 struct caam_drv_private *ctrlpriv;
469 struct caam_drv_private_jr *jrpriv;
470 u32 *jroffset;
471 int error;
472
473 ctrldev = &pdev->dev;
474 ctrlpriv = dev_get_drvdata(ctrldev);
475
476 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
477 GFP_KERNEL);
478 if (jrpriv == NULL) {
479 dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
480 ring);
481 return -ENOMEM;
482 }
483 jrpriv->parentdev = ctrldev; /* point back to parent */
484 jrpriv->ridx = ring; /* save ring identity relative to detection */
485
486 /*
487 * Derive a pointer to the detected JobRs regs
488 * Driver has already iomapped the entire space, we just
489 * need to add in the offset to this JobR. Don't know if I
490 * like this long-term, but it'll run
491 */
492 jroffset = (u32 *)of_get_property(np, "reg", NULL);
493 jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
494 + *jroffset);
495
496 /* Build a local dev for each detected queue */
497 jr_pdev = of_platform_device_create(np, NULL, ctrldev);
498 if (jr_pdev == NULL) {
499 kfree(jrpriv);
500 return -EINVAL;
501 }
502 jrdev = &jr_pdev->dev;
503 dev_set_drvdata(jrdev, jrpriv);
504 ctrlpriv->jrdev[ring] = jrdev;
505
506 /* Identify the interrupt */
507 jrpriv->irq = of_irq_to_resource(np, 0, NULL);
508
509 /* Now do the platform independent part */
510 error = caam_jr_init(jrdev); /* now turn on hardware */
511 if (error) {
512 kfree(jrpriv);
513 return error;
514 }
515
516 return error;
517}
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
new file mode 100644
index 000000000000..c23df395b622
--- /dev/null
+++ b/drivers/crypto/caam/jr.h
@@ -0,0 +1,21 @@
1/*
2 * CAAM public-level include definitions for the JobR backend
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef JR_H
8#define JR_H
9
10/* Prototypes for backend-level services exposed to APIs */
11int caam_jr_register(struct device *ctrldev, struct device **rdev);
12int caam_jr_deregister(struct device *rdev);
13int caam_jr_enqueue(struct device *dev, u32 *desc,
14 void (*cbk)(struct device *dev, u32 *desc, u32 status,
15 void *areq),
16 void *areq);
17
18extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
19 int ring);
20extern int caam_jr_shutdown(struct device *dev);
21#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
new file mode 100644
index 000000000000..aee394e39056
--- /dev/null
+++ b/drivers/crypto/caam/regs.h
@@ -0,0 +1,663 @@
1/*
2 * CAAM hardware register-level view
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef REGS_H
8#define REGS_H
9
10#include <linux/types.h>
11#include <linux/io.h>
12
13/*
14 * Architecture-specific register access methods
15 *
16 * CAAM's bus-addressable registers are 64 bits internally.
17 * They have been wired to be safely accessible on 32-bit
18 * architectures, however. Registers were organized such
19 * that (a) they can be contained in 32 bits, (b) if not, then they
20 * can be treated as two 32-bit entities, or finally (c) if they
21 * must be treated as a single 64-bit value, then this can safely
22 * be done with two 32-bit cycles.
23 *
24 * For 32-bit operations on 64-bit values, CAAM follows the same
25 * 64-bit register access conventions as it's predecessors, in that
26 * writes are "triggered" by a write to the register at the numerically
27 * higher address, thus, a full 64-bit write cycle requires a write
28 * to the lower address, followed by a write to the higher address,
29 * which will latch/execute the write cycle.
30 *
31 * For example, let's assume a SW reset of CAAM through the master
32 * configuration register.
33 * - SWRST is in bit 31 of MCFG.
34 * - MCFG begins at base+0x0000.
35 * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
36 * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
37 *
38 * (and on Power, the convention is 0-31, 32-63, I know...)
39 *
40 * Assuming a 64-bit write to this MCFG to perform a software reset
41 * would then require a write of 0 to base+0x0000, followed by a
42 * write of 0x80000000 to base+0x0004, which would "execute" the
43 * reset.
44 *
45 * Of course, since MCFG 63-32 is all zero, we could cheat and simply
46 * write 0x8000000 to base+0x0004, and the reset would work fine.
47 * However, since CAAM does contain some write-and-read-intended
48 * 64-bit registers, this code defines 64-bit access methods for
49 * the sake of internal consistency and simplicity, and so that a
50 * clean transition to 64-bit is possible when it becomes necessary.
51 *
52 * There are limitations to this that the developer must recognize.
53 * 32-bit architectures cannot enforce an atomic-64 operation,
54 * Therefore:
55 *
56 * - On writes, since the HW is assumed to latch the cycle on the
57 * write of the higher-numeric-address word, then ordered
58 * writes work OK.
59 *
60 * - For reads, where a register contains a relevant value of more
61 * that 32 bits, the hardware employs logic to latch the other
62 * "half" of the data until read, ensuring an accurate value.
63 * This is of particular relevance when dealing with CAAM's
64 * performance counters.
65 *
66 */
67
68#ifdef __BIG_ENDIAN
69#define wr_reg32(reg, data) out_be32(reg, data)
70#define rd_reg32(reg) in_be32(reg)
71#ifdef CONFIG_64BIT
72#define wr_reg64(reg, data) out_be64(reg, data)
73#define rd_reg64(reg) in_be64(reg)
74#endif
75#else
76#ifdef __LITTLE_ENDIAN
77#define wr_reg32(reg, data) __raw_writel(reg, data)
78#define rd_reg32(reg) __raw_readl(reg)
79#ifdef CONFIG_64BIT
80#define wr_reg64(reg, data) __raw_writeq(reg, data)
81#define rd_reg64(reg) __raw_readq(reg)
82#endif
83#endif
84#endif
85
86#ifndef CONFIG_64BIT
87static inline void wr_reg64(u64 __iomem *reg, u64 data)
88{
89 wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
90 wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull);
91}
92
93static inline u64 rd_reg64(u64 __iomem *reg)
94{
95 return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
96 ((u64)rd_reg32((u32 __iomem *)reg + 1));
97}
98#endif
99
100/*
101 * jr_outentry
102 * Represents each entry in a JobR output ring
103 */
104struct jr_outentry {
105 dma_addr_t desc;/* Pointer to completed descriptor */
106 u32 jrstatus; /* Status for completed descriptor */
107} __packed;
108
109/*
110 * caam_perfmon - Performance Monitor/Secure Memory Status/
111 * CAAM Global Status/Component Version IDs
112 *
113 * Spans f00-fff wherever instantiated
114 */
115
116/* Number of DECOs */
117#define CHA_NUM_DECONUM_SHIFT 56
118#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
119
120struct caam_perfmon {
121 /* Performance Monitor Registers f00-f9f */
122 u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
123 u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */
124 u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests */
125 u64 ob_enc_bytes; /* PC_OB_ENCRYPT - Outbound Bytes Encrypted */
126 u64 ob_prot_bytes; /* PC_OB_PROTECT - Outbound Bytes Protected */
127 u64 ib_dec_bytes; /* PC_IB_DECRYPT - Inbound Bytes Decrypted */
128 u64 ib_valid_bytes; /* PC_IB_VALIDATED Inbound Bytes Validated */
129 u64 rsvd[13];
130
131 /* CAAM Hardware Instantiation Parameters fa0-fbf */
132 u64 cha_rev; /* CRNR - CHA Revision Number */
133#define CTPR_QI_SHIFT 57
134#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT)
135 u64 comp_parms; /* CTPR - Compile Parameters Register */
136 u64 rsvd1[2];
137
138 /* CAAM Global Status fc0-fdf */
139 u64 faultaddr; /* FAR - Fault Address */
140 u32 faultliodn; /* FALR - Fault Address LIODN */
141 u32 faultdetail; /* FADR - Fault Addr Detail */
142 u32 rsvd2;
143 u32 status; /* CSTA - CAAM Status */
144 u64 rsvd3;
145
146 /* Component Instantiation Parameters fe0-fff */
147 u32 rtic_id; /* RVID - RTIC Version ID */
148 u32 ccb_id; /* CCBVID - CCB Version ID */
149 u64 cha_id; /* CHAVID - CHA Version ID */
150 u64 cha_num; /* CHANUM - CHA Number */
151 u64 caam_id; /* CAAMVID - CAAM Version ID */
152};
153
154/* LIODN programming for DMA configuration */
155#define MSTRID_LOCK_LIODN 0x80000000
156#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */
157
158#define MSTRID_LIODN_MASK 0x0fff
159struct masterid {
160 u32 liodn_ms; /* lock and make-trusted control bits */
161 u32 liodn_ls; /* LIODN for non-sequence and seq access */
162};
163
164/* Partition ID for DMA configuration */
165struct partid {
166 u32 rsvd1;
167 u32 pidr; /* partition ID, DECO */
168};
169
170/* RNG test mode (replicated twice in some configurations) */
171/* Padded out to 0x100 */
172struct rngtst {
173 u32 mode; /* RTSTMODEx - Test mode */
174 u32 rsvd1[3];
175 u32 reset; /* RTSTRESETx - Test reset control */
176 u32 rsvd2[3];
177 u32 status; /* RTSTSSTATUSx - Test status */
178 u32 rsvd3;
179 u32 errstat; /* RTSTERRSTATx - Test error status */
180 u32 rsvd4;
181 u32 errctl; /* RTSTERRCTLx - Test error control */
182 u32 rsvd5;
183 u32 entropy; /* RTSTENTROPYx - Test entropy */
184 u32 rsvd6[15];
185 u32 verifctl; /* RTSTVERIFCTLx - Test verification control */
186 u32 rsvd7;
187 u32 verifstat; /* RTSTVERIFSTATx - Test verification status */
188 u32 rsvd8;
189 u32 verifdata; /* RTSTVERIFDx - Test verification data */
190 u32 rsvd9;
191 u32 xkey; /* RTSTXKEYx - Test XKEY */
192 u32 rsvd10;
193 u32 oscctctl; /* RTSTOSCCTCTLx - Test osc. counter control */
194 u32 rsvd11;
195 u32 oscct; /* RTSTOSCCTx - Test oscillator counter */
196 u32 rsvd12;
197 u32 oscctstat; /* RTSTODCCTSTATx - Test osc counter status */
198 u32 rsvd13[2];
199 u32 ofifo[4]; /* RTSTOFIFOx - Test output FIFO */
200 u32 rsvd14[15];
201};
202
203/*
204 * caam_ctrl - basic core configuration
205 * starts base + 0x0000 padded out to 0x1000
206 */
207
208#define KEK_KEY_SIZE 8
209#define TKEK_KEY_SIZE 8
210#define TDSK_KEY_SIZE 8
211
212#define DECO_RESET 1 /* Use with DECO reset/availability regs */
213#define DECO_RESET_0 (DECO_RESET << 0)
214#define DECO_RESET_1 (DECO_RESET << 1)
215#define DECO_RESET_2 (DECO_RESET << 2)
216#define DECO_RESET_3 (DECO_RESET << 3)
217#define DECO_RESET_4 (DECO_RESET << 4)
218
219struct caam_ctrl {
220 /* Basic Configuration Section 000-01f */
221 /* Read/Writable */
222 u32 rsvd1;
223 u32 mcr; /* MCFG Master Config Register */
224 u32 rsvd2[2];
225
226 /* Bus Access Configuration Section 010-11f */
227 /* Read/Writable */
228 struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
229 u32 rsvd3[12];
230 struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
231 u32 rsvd4[7];
232 u32 deco_rq; /* DECORR - DECO Request */
233 struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
234 u32 rsvd5[22];
235
236 /* DECO Availability/Reset Section 120-3ff */
237 u32 deco_avail; /* DAR - DECO availability */
238 u32 deco_reset; /* DRR - DECO reset */
239 u32 rsvd6[182];
240
241 /* Key Encryption/Decryption Configuration 400-5ff */
242 /* Read/Writable only while in Non-secure mode */
243 u32 kek[KEK_KEY_SIZE]; /* JDKEKR - Key Encryption Key */
244 u32 tkek[TKEK_KEY_SIZE]; /* TDKEKR - Trusted Desc KEK */
245 u32 tdsk[TDSK_KEY_SIZE]; /* TDSKR - Trusted Desc Signing Key */
246 u32 rsvd7[32];
247 u64 sknonce; /* SKNR - Secure Key Nonce */
248 u32 rsvd8[70];
249
250 /* RNG Test/Verification/Debug Access 600-7ff */
251 /* (Useful in Test/Debug modes only...) */
252 struct rngtst rtst[2];
253
254 u32 rsvd9[448];
255
256 /* Performance Monitor f00-fff */
257 struct caam_perfmon perfmon;
258};
259
260/*
261 * Controller master config register defs
262 */
263#define MCFGR_SWRESET 0x80000000 /* software reset */
264#define MCFGR_WDENABLE 0x40000000 /* DECO watchdog enable */
265#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */
266#define MCFGR_DMA_RESET 0x10000000
267#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
268
269/* AXI read cache control */
270#define MCFGR_ARCACHE_SHIFT 12
271#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
272
273/* AXI write cache control */
274#define MCFGR_AWCACHE_SHIFT 8
275#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
276
277/* AXI pipeline depth */
278#define MCFGR_AXIPIPE_SHIFT 4
279#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
280
281#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
282#define MCFGR_BURST_64 0x00000001 /* Max burst size */
283
284/*
285 * caam_job_ring - direct job ring setup
286 * 1-4 possible per instantiation, base + 1000/2000/3000/4000
287 * Padded out to 0x1000
288 */
289struct caam_job_ring {
290 /* Input ring */
291 u64 inpring_base; /* IRBAx - Input desc ring baseaddr */
292 u32 rsvd1;
293 u32 inpring_size; /* IRSx - Input ring size */
294 u32 rsvd2;
295 u32 inpring_avail; /* IRSAx - Input ring room remaining */
296 u32 rsvd3;
297 u32 inpring_jobadd; /* IRJAx - Input ring jobs added */
298
299 /* Output Ring */
300 u64 outring_base; /* ORBAx - Output status ring base addr */
301 u32 rsvd4;
302 u32 outring_size; /* ORSx - Output ring size */
303 u32 rsvd5;
304 u32 outring_rmvd; /* ORJRx - Output ring jobs removed */
305 u32 rsvd6;
306 u32 outring_used; /* ORSFx - Output ring slots full */
307
308 /* Status/Configuration */
309 u32 rsvd7;
310 u32 jroutstatus; /* JRSTAx - JobR output status */
311 u32 rsvd8;
312 u32 jrintstatus; /* JRINTx - JobR interrupt status */
313 u32 rconfig_hi; /* JRxCFG - Ring configuration */
314 u32 rconfig_lo;
315
316 /* Indices. CAAM maintains as "heads" of each queue */
317 u32 rsvd9;
318 u32 inp_rdidx; /* IRRIx - Input ring read index */
319 u32 rsvd10;
320 u32 out_wtidx; /* ORWIx - Output ring write index */
321
322 /* Command/control */
323 u32 rsvd11;
324 u32 jrcommand; /* JRCRx - JobR command */
325
326 u32 rsvd12[932];
327
328 /* Performance Monitor f00-fff */
329 struct caam_perfmon perfmon;
330};
331
332#define JR_RINGSIZE_MASK 0x03ff
333/*
334 * jrstatus - Job Ring Output Status
335 * All values in lo word
336 * Also note, same values written out as status through QI
337 * in the command/status field of a frame descriptor
338 */
339#define JRSTA_SSRC_SHIFT 28
340#define JRSTA_SSRC_MASK 0xf0000000
341
342#define JRSTA_SSRC_NONE 0x00000000
343#define JRSTA_SSRC_CCB_ERROR 0x20000000
344#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
345#define JRSTA_SSRC_DECO 0x40000000
346#define JRSTA_SSRC_JRERROR 0x60000000
347#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000
348
349#define JRSTA_DECOERR_JUMP 0x08000000
350#define JRSTA_DECOERR_INDEX_SHIFT 8
351#define JRSTA_DECOERR_INDEX_MASK 0xff00
352#define JRSTA_DECOERR_ERROR_MASK 0x00ff
353
354#define JRSTA_DECOERR_NONE 0x00
355#define JRSTA_DECOERR_LINKLEN 0x01
356#define JRSTA_DECOERR_LINKPTR 0x02
357#define JRSTA_DECOERR_JRCTRL 0x03
358#define JRSTA_DECOERR_DESCCMD 0x04
359#define JRSTA_DECOERR_ORDER 0x05
360#define JRSTA_DECOERR_KEYCMD 0x06
361#define JRSTA_DECOERR_LOADCMD 0x07
362#define JRSTA_DECOERR_STORECMD 0x08
363#define JRSTA_DECOERR_OPCMD 0x09
364#define JRSTA_DECOERR_FIFOLDCMD 0x0a
365#define JRSTA_DECOERR_FIFOSTCMD 0x0b
366#define JRSTA_DECOERR_MOVECMD 0x0c
367#define JRSTA_DECOERR_JUMPCMD 0x0d
368#define JRSTA_DECOERR_MATHCMD 0x0e
369#define JRSTA_DECOERR_SHASHCMD 0x0f
370#define JRSTA_DECOERR_SEQCMD 0x10
371#define JRSTA_DECOERR_DECOINTERNAL 0x11
372#define JRSTA_DECOERR_SHDESCHDR 0x12
373#define JRSTA_DECOERR_HDRLEN 0x13
374#define JRSTA_DECOERR_BURSTER 0x14
375#define JRSTA_DECOERR_DESCSIGNATURE 0x15
376#define JRSTA_DECOERR_DMA 0x16
377#define JRSTA_DECOERR_BURSTFIFO 0x17
378#define JRSTA_DECOERR_JRRESET 0x1a
379#define JRSTA_DECOERR_JOBFAIL 0x1b
380#define JRSTA_DECOERR_DNRERR 0x80
381#define JRSTA_DECOERR_UNDEFPCL 0x81
382#define JRSTA_DECOERR_PDBERR 0x82
383#define JRSTA_DECOERR_ANRPLY_LATE 0x83
384#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
385#define JRSTA_DECOERR_SEQOVF 0x85
386#define JRSTA_DECOERR_INVSIGN 0x86
387#define JRSTA_DECOERR_DSASIGN 0x87
388
389#define JRSTA_CCBERR_JUMP 0x08000000
390#define JRSTA_CCBERR_INDEX_MASK 0xff00
391#define JRSTA_CCBERR_INDEX_SHIFT 8
392#define JRSTA_CCBERR_CHAID_MASK 0x00f0
393#define JRSTA_CCBERR_CHAID_SHIFT 4
394#define JRSTA_CCBERR_ERRID_MASK 0x000f
395
396#define JRSTA_CCBERR_CHAID_AES (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
397#define JRSTA_CCBERR_CHAID_DES (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
398#define JRSTA_CCBERR_CHAID_ARC4 (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
399#define JRSTA_CCBERR_CHAID_MD (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
400#define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
401#define JRSTA_CCBERR_CHAID_SNOW (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
402#define JRSTA_CCBERR_CHAID_KASUMI (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
403#define JRSTA_CCBERR_CHAID_PK (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
404#define JRSTA_CCBERR_CHAID_CRC (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
405
406#define JRSTA_CCBERR_ERRID_NONE 0x00
407#define JRSTA_CCBERR_ERRID_MODE 0x01
408#define JRSTA_CCBERR_ERRID_DATASIZ 0x02
409#define JRSTA_CCBERR_ERRID_KEYSIZ 0x03
410#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
411#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
412#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
413#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
414#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
415#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
416#define JRSTA_CCBERR_ERRID_ICVCHK 0x0a
417#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
418#define JRSTA_CCBERR_ERRID_CCMAAD 0x0c
419#define JRSTA_CCBERR_ERRID_INVCHA 0x0f
420
421#define JRINT_ERR_INDEX_MASK 0x3fff0000
422#define JRINT_ERR_INDEX_SHIFT 16
423#define JRINT_ERR_TYPE_MASK 0xf00
424#define JRINT_ERR_TYPE_SHIFT 8
425#define JRINT_ERR_HALT_MASK 0xc
426#define JRINT_ERR_HALT_SHIFT 2
427#define JRINT_ERR_HALT_INPROGRESS 0x4
428#define JRINT_ERR_HALT_COMPLETE 0x8
429#define JRINT_JR_ERROR 0x02
430#define JRINT_JR_INT 0x01
431
432#define JRINT_ERR_TYPE_WRITE 1
433#define JRINT_ERR_TYPE_BAD_INPADDR 3
434#define JRINT_ERR_TYPE_BAD_OUTADDR 4
435#define JRINT_ERR_TYPE_INV_INPWRT 5
436#define JRINT_ERR_TYPE_INV_OUTWRT 6
437#define JRINT_ERR_TYPE_RESET 7
438#define JRINT_ERR_TYPE_REMOVE_OFL 8
439#define JRINT_ERR_TYPE_ADD_OFL 9
440
441#define JRCFG_SOE 0x04
442#define JRCFG_ICEN 0x02
443#define JRCFG_IMSK 0x01
444#define JRCFG_ICDCT_SHIFT 8
445#define JRCFG_ICTT_SHIFT 16
446
447#define JRCR_RESET 0x01
448
449/*
450 * caam_assurance - Assurance Controller View
451 * base + 0x6000 padded out to 0x1000
452 */
453
454struct rtic_element {
455 u64 address;
456 u32 rsvd;
457 u32 length;
458};
459
460struct rtic_block {
461 struct rtic_element element[2];
462};
463
464struct rtic_memhash {
465 u32 memhash_be[32];
466 u32 memhash_le[32];
467};
468
469struct caam_assurance {
470 /* Status/Command/Watchdog */
471 u32 rsvd1;
472 u32 status; /* RSTA - Status */
473 u32 rsvd2;
474 u32 cmd; /* RCMD - Command */
475 u32 rsvd3;
476 u32 ctrl; /* RCTL - Control */
477 u32 rsvd4;
478 u32 throttle; /* RTHR - Throttle */
479 u32 rsvd5[2];
480 u64 watchdog; /* RWDOG - Watchdog Timer */
481 u32 rsvd6;
482 u32 rend; /* REND - Endian corrections */
483 u32 rsvd7[50];
484
485 /* Block access/configuration @ 100/110/120/130 */
486 struct rtic_block memblk[4]; /* Memory Blocks A-D */
487 u32 rsvd8[32];
488
489 /* Block hashes @ 200/300/400/500 */
490 struct rtic_memhash hash[4]; /* Block hash values A-D */
491 u32 rsvd_3[640];
492};
493
494/*
495 * caam_queue_if - QI configuration and control
496 * starts base + 0x7000, padded out to 0x1000 long
497 */
498
499struct caam_queue_if {
500 u32 qi_control_hi; /* QICTL - QI Control */
501 u32 qi_control_lo;
502 u32 rsvd1;
503 u32 qi_status; /* QISTA - QI Status */
504 u32 qi_deq_cfg_hi; /* QIDQC - QI Dequeue Configuration */
505 u32 qi_deq_cfg_lo;
506 u32 qi_enq_cfg_hi; /* QISEQC - QI Enqueue Command */
507 u32 qi_enq_cfg_lo;
508 u32 rsvd2[1016];
509};
510
511/* QI control bits - low word */
512#define QICTL_DQEN 0x01 /* Enable frame pop */
513#define QICTL_STOP 0x02 /* Stop dequeue/enqueue */
514#define QICTL_SOE 0x04 /* Stop on error */
515
516/* QI control bits - high word */
517#define QICTL_MBSI 0x01
518#define QICTL_MHWSI 0x02
519#define QICTL_MWSI 0x04
520#define QICTL_MDWSI 0x08
521#define QICTL_CBSI 0x10 /* CtrlDataByteSwapInput */
522#define QICTL_CHWSI 0x20 /* CtrlDataHalfSwapInput */
523#define QICTL_CWSI 0x40 /* CtrlDataWordSwapInput */
524#define QICTL_CDWSI 0x80 /* CtrlDataDWordSwapInput */
525#define QICTL_MBSO 0x0100
526#define QICTL_MHWSO 0x0200
527#define QICTL_MWSO 0x0400
528#define QICTL_MDWSO 0x0800
529#define QICTL_CBSO 0x1000 /* CtrlDataByteSwapOutput */
530#define QICTL_CHWSO 0x2000 /* CtrlDataHalfSwapOutput */
531#define QICTL_CWSO 0x4000 /* CtrlDataWordSwapOutput */
532#define QICTL_CDWSO 0x8000 /* CtrlDataDWordSwapOutput */
533#define QICTL_DMBS 0x010000
534#define QICTL_EPO 0x020000
535
536/* QI status bits */
537#define QISTA_PHRDERR 0x01 /* PreHeader Read Error */
538#define QISTA_CFRDERR 0x02 /* Compound Frame Read Error */
539#define QISTA_OFWRERR 0x04 /* Output Frame Read Error */
540#define QISTA_BPDERR 0x08 /* Buffer Pool Depleted */
541#define QISTA_BTSERR 0x10 /* Buffer Undersize */
542#define QISTA_CFWRERR 0x20 /* Compound Frame Write Err */
543#define QISTA_STOPD 0x80000000 /* QI Stopped (see QICTL) */
544
545/* deco_sg_table - DECO view of scatter/gather table */
546struct deco_sg_table {
547 u64 addr; /* Segment Address */
548 u32 elen; /* E, F bits + 30-bit length */
549 u32 bpid_offset; /* Buffer Pool ID + 16-bit length */
550};
551
552/*
553 * caam_deco - descriptor controller - CHA cluster block
554 *
555 * Only accessible when direct DECO access is turned on
556 * (done in DECORR, via MID programmed in DECOxMID
557 *
558 * 5 typical, base + 0x8000/9000/a000/b000
559 * Padded out to 0x1000 long
560 */
561struct caam_deco {
562 u32 rsvd1;
563 u32 cls1_mode; /* CxC1MR - Class 1 Mode */
564 u32 rsvd2;
565 u32 cls1_keysize; /* CxC1KSR - Class 1 Key Size */
566 u32 cls1_datasize_hi; /* CxC1DSR - Class 1 Data Size */
567 u32 cls1_datasize_lo;
568 u32 rsvd3;
569 u32 cls1_icvsize; /* CxC1ICVSR - Class 1 ICV size */
570 u32 rsvd4[5];
571 u32 cha_ctrl; /* CCTLR - CHA control */
572 u32 rsvd5;
573 u32 irq_crtl; /* CxCIRQ - CCB interrupt done/error/clear */
574 u32 rsvd6;
575 u32 clr_written; /* CxCWR - Clear-Written */
576 u32 ccb_status_hi; /* CxCSTA - CCB Status/Error */
577 u32 ccb_status_lo;
578 u32 rsvd7[3];
579 u32 aad_size; /* CxAADSZR - Current AAD Size */
580 u32 rsvd8;
581 u32 cls1_iv_size; /* CxC1IVSZR - Current Class 1 IV Size */
582 u32 rsvd9[7];
583 u32 pkha_a_size; /* PKASZRx - Size of PKHA A */
584 u32 rsvd10;
585 u32 pkha_b_size; /* PKBSZRx - Size of PKHA B */
586 u32 rsvd11;
587 u32 pkha_n_size; /* PKNSZRx - Size of PKHA N */
588 u32 rsvd12;
589 u32 pkha_e_size; /* PKESZRx - Size of PKHA E */
590 u32 rsvd13[24];
591 u32 cls1_ctx[16]; /* CxC1CTXR - Class 1 Context @100 */
592 u32 rsvd14[48];
593 u32 cls1_key[8]; /* CxC1KEYR - Class 1 Key @200 */
594 u32 rsvd15[121];
595 u32 cls2_mode; /* CxC2MR - Class 2 Mode */
596 u32 rsvd16;
597 u32 cls2_keysize; /* CxX2KSR - Class 2 Key Size */
598 u32 cls2_datasize_hi; /* CxC2DSR - Class 2 Data Size */
599 u32 cls2_datasize_lo;
600 u32 rsvd17;
601 u32 cls2_icvsize; /* CxC2ICVSZR - Class 2 ICV Size */
602 u32 rsvd18[56];
603 u32 cls2_ctx[18]; /* CxC2CTXR - Class 2 Context @500 */
604 u32 rsvd19[46];
605 u32 cls2_key[32]; /* CxC2KEYR - Class2 Key @600 */
606 u32 rsvd20[84];
607 u32 inp_infofifo_hi; /* CxIFIFO - Input Info FIFO @7d0 */
608 u32 inp_infofifo_lo;
609 u32 rsvd21[2];
610 u64 inp_datafifo; /* CxDFIFO - Input Data FIFO */
611 u32 rsvd22[2];
612 u64 out_datafifo; /* CxOFIFO - Output Data FIFO */
613 u32 rsvd23[2];
614 u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
615 u32 jr_ctl_lo;
616 u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
617 u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
618 u32 op_status_lo;
619 u32 rsvd24[2];
620 u32 liodn; /* DxLSR - DECO LIODN Status - non-seq */
621 u32 td_liodn; /* DxLSR - DECO LIODN Status - trustdesc */
622 u32 rsvd26[6];
623 u64 math[4]; /* DxMTH - Math register */
624 u32 rsvd27[8];
625 struct deco_sg_table gthr_tbl[4]; /* DxGTR - Gather Tables */
626 u32 rsvd28[16];
627 struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
628 u32 rsvd29[48];
629 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
630 u32 rsvd30[320];
631};
632
633/*
634 * Current top-level view of memory map is:
635 *
636 * 0x0000 - 0x0fff - CAAM Top-Level Control
637 * 0x1000 - 0x1fff - Job Ring 0
638 * 0x2000 - 0x2fff - Job Ring 1
639 * 0x3000 - 0x3fff - Job Ring 2
640 * 0x4000 - 0x4fff - Job Ring 3
641 * 0x5000 - 0x5fff - (unused)
642 * 0x6000 - 0x6fff - Assurance Controller
643 * 0x7000 - 0x7fff - Queue Interface
644 * 0x8000 - 0x8fff - DECO-CCB 0
645 * 0x9000 - 0x9fff - DECO-CCB 1
646 * 0xa000 - 0xafff - DECO-CCB 2
647 * 0xb000 - 0xbfff - DECO-CCB 3
648 * 0xc000 - 0xcfff - DECO-CCB 4
649 *
650 * caam_full describes the full register view of CAAM if useful,
651 * although many configurations may choose to implement parts of
652 * the register map separately, in differing privilege regions
653 */
654struct caam_full {
655 struct caam_ctrl __iomem ctrl;
656 struct caam_job_ring jr[4];
657 u64 rsvd[512];
658 struct caam_assurance assure;
659 struct caam_queue_if qi;
660 struct caam_deco *deco;
661};
662
663#endif /* REGS_H */
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index c99305afa58a..3cf303ee3fe3 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -133,7 +133,6 @@ struct mv_req_hash_ctx {
133 int extra_bytes; /* unprocessed bytes in buffer */ 133 int extra_bytes; /* unprocessed bytes in buffer */
134 enum hash_op op; 134 enum hash_op op;
135 int count_add; 135 int count_add;
136 struct scatterlist dummysg;
137}; 136};
138 137
139static void compute_aes_dec_key(struct mv_ctx *ctx) 138static void compute_aes_dec_key(struct mv_ctx *ctx)
@@ -187,9 +186,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
187{ 186{
188 int ret; 187 int ret;
189 void *sbuf; 188 void *sbuf;
190 int copied = 0; 189 int copy_len;
191 190
192 while (1) { 191 while (len) {
193 if (!p->sg_src_left) { 192 if (!p->sg_src_left) {
194 ret = sg_miter_next(&p->src_sg_it); 193 ret = sg_miter_next(&p->src_sg_it);
195 BUG_ON(!ret); 194 BUG_ON(!ret);
@@ -199,19 +198,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
199 198
200 sbuf = p->src_sg_it.addr + p->src_start; 199 sbuf = p->src_sg_it.addr + p->src_start;
201 200
202 if (p->sg_src_left <= len - copied) { 201 copy_len = min(p->sg_src_left, len);
203 memcpy(dbuf + copied, sbuf, p->sg_src_left); 202 memcpy(dbuf, sbuf, copy_len);
204 copied += p->sg_src_left; 203
205 p->sg_src_left = 0; 204 p->src_start += copy_len;
206 if (copied >= len) 205 p->sg_src_left -= copy_len;
207 break; 206
208 } else { 207 len -= copy_len;
209 int copy_len = len - copied; 208 dbuf += copy_len;
210 memcpy(dbuf + copied, sbuf, copy_len);
211 p->src_start += copy_len;
212 p->sg_src_left -= copy_len;
213 break;
214 }
215 } 209 }
216} 210}
217 211
@@ -275,7 +269,6 @@ static void mv_process_current_q(int first_block)
275 memcpy(cpg->sram + SRAM_CONFIG, &op, 269 memcpy(cpg->sram + SRAM_CONFIG, &op,
276 sizeof(struct sec_accel_config)); 270 sizeof(struct sec_accel_config));
277 271
278 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
279 /* GO */ 272 /* GO */
280 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 273 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
281 274
@@ -302,6 +295,7 @@ static void mv_crypto_algo_completion(void)
302static void mv_process_hash_current(int first_block) 295static void mv_process_hash_current(int first_block)
303{ 296{
304 struct ahash_request *req = ahash_request_cast(cpg->cur_req); 297 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
298 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
305 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); 299 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
306 struct req_progress *p = &cpg->p; 300 struct req_progress *p = &cpg->p;
307 struct sec_accel_config op = { 0 }; 301 struct sec_accel_config op = { 0 };
@@ -314,6 +308,8 @@ static void mv_process_hash_current(int first_block)
314 break; 308 break;
315 case COP_HMAC_SHA1: 309 case COP_HMAC_SHA1:
316 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; 310 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
311 memcpy(cpg->sram + SRAM_HMAC_IV_IN,
312 tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
317 break; 313 break;
318 } 314 }
319 315
@@ -345,11 +341,16 @@ static void mv_process_hash_current(int first_block)
345 op.config |= CFG_LAST_FRAG; 341 op.config |= CFG_LAST_FRAG;
346 else 342 else
347 op.config |= CFG_MID_FRAG; 343 op.config |= CFG_MID_FRAG;
344
345 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
346 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
347 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
348 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
349 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
348 } 350 }
349 351
350 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); 352 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
351 353
352 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
353 /* GO */ 354 /* GO */
354 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 355 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
355 356
@@ -409,12 +410,6 @@ static void mv_hash_algo_completion(void)
409 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); 410 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
410 sg_miter_stop(&cpg->p.src_sg_it); 411 sg_miter_stop(&cpg->p.src_sg_it);
411 412
412 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
413 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
414 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
415 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
416 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
417
418 if (likely(ctx->last_chunk)) { 413 if (likely(ctx->last_chunk)) {
419 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { 414 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
420 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, 415 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
@@ -422,6 +417,12 @@ static void mv_hash_algo_completion(void)
422 (req))); 417 (req)));
423 } else 418 } else
424 mv_hash_final_fallback(req); 419 mv_hash_final_fallback(req);
420 } else {
421 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
422 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
423 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
424 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
425 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
425 } 426 }
426} 427}
427 428
@@ -480,7 +481,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
480 int i = 0; 481 int i = 0;
481 size_t cur_len; 482 size_t cur_len;
482 483
483 while (1) { 484 while (sl) {
484 cur_len = sl[i].length; 485 cur_len = sl[i].length;
485 ++i; 486 ++i;
486 if (total_bytes > cur_len) 487 if (total_bytes > cur_len)
@@ -517,29 +518,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
517{ 518{
518 struct req_progress *p = &cpg->p; 519 struct req_progress *p = &cpg->p;
519 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); 520 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
520 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
521 int num_sgs, hw_bytes, old_extra_bytes, rc; 521 int num_sgs, hw_bytes, old_extra_bytes, rc;
522 cpg->cur_req = &req->base; 522 cpg->cur_req = &req->base;
523 memset(p, 0, sizeof(struct req_progress)); 523 memset(p, 0, sizeof(struct req_progress));
524 hw_bytes = req->nbytes + ctx->extra_bytes; 524 hw_bytes = req->nbytes + ctx->extra_bytes;
525 old_extra_bytes = ctx->extra_bytes; 525 old_extra_bytes = ctx->extra_bytes;
526 526
527 if (unlikely(ctx->extra_bytes)) {
528 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
529 ctx->extra_bytes);
530 p->crypt_len = ctx->extra_bytes;
531 }
532
533 memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
534
535 if (unlikely(!ctx->first_hash)) {
536 writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
537 writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
538 writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
539 writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
540 writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
541 }
542
543 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; 527 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
544 if (ctx->extra_bytes != 0 528 if (ctx->extra_bytes != 0
545 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) 529 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
@@ -555,6 +539,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
555 p->complete = mv_hash_algo_completion; 539 p->complete = mv_hash_algo_completion;
556 p->process = mv_process_hash_current; 540 p->process = mv_process_hash_current;
557 541
542 if (unlikely(old_extra_bytes)) {
543 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
544 old_extra_bytes);
545 p->crypt_len = old_extra_bytes;
546 }
547
558 mv_process_hash_current(1); 548 mv_process_hash_current(1);
559 } else { 549 } else {
560 copy_src_to_buf(p, ctx->buffer + old_extra_bytes, 550 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
@@ -603,9 +593,7 @@ static int queue_manag(void *data)
603 if (async_req->tfm->__crt_alg->cra_type != 593 if (async_req->tfm->__crt_alg->cra_type !=
604 &crypto_ahash_type) { 594 &crypto_ahash_type) {
605 struct ablkcipher_request *req = 595 struct ablkcipher_request *req =
606 container_of(async_req, 596 ablkcipher_request_cast(async_req);
607 struct ablkcipher_request,
608 base);
609 mv_start_new_crypt_req(req); 597 mv_start_new_crypt_req(req);
610 } else { 598 } else {
611 struct ahash_request *req = 599 struct ahash_request *req =
@@ -722,19 +710,13 @@ static int mv_hash_update(struct ahash_request *req)
722static int mv_hash_final(struct ahash_request *req) 710static int mv_hash_final(struct ahash_request *req)
723{ 711{
724 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); 712 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
725 /* dummy buffer of 4 bytes */ 713
726 sg_init_one(&ctx->dummysg, ctx->buffer, 4);
727 /* I think I'm allowed to do that... */
728 ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
729 mv_update_hash_req_ctx(ctx, 1, 0); 714 mv_update_hash_req_ctx(ctx, 1, 0);
730 return mv_handle_req(&req->base); 715 return mv_handle_req(&req->base);
731} 716}
732 717
733static int mv_hash_finup(struct ahash_request *req) 718static int mv_hash_finup(struct ahash_request *req)
734{ 719{
735 if (!req->nbytes)
736 return mv_hash_final(req);
737
738 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); 720 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
739 return mv_handle_req(&req->base); 721 return mv_handle_req(&req->base);
740} 722}
@@ -1065,14 +1047,21 @@ static int mv_probe(struct platform_device *pdev)
1065 1047
1066 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); 1048 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1067 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); 1049 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1050 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1068 1051
1069 ret = crypto_register_alg(&mv_aes_alg_ecb); 1052 ret = crypto_register_alg(&mv_aes_alg_ecb);
1070 if (ret) 1053 if (ret) {
1054 printk(KERN_WARNING MV_CESA
1055 "Could not register aes-ecb driver\n");
1071 goto err_irq; 1056 goto err_irq;
1057 }
1072 1058
1073 ret = crypto_register_alg(&mv_aes_alg_cbc); 1059 ret = crypto_register_alg(&mv_aes_alg_cbc);
1074 if (ret) 1060 if (ret) {
1061 printk(KERN_WARNING MV_CESA
1062 "Could not register aes-cbc driver\n");
1075 goto err_unreg_ecb; 1063 goto err_unreg_ecb;
1064 }
1076 1065
1077 ret = crypto_register_ahash(&mv_sha1_alg); 1066 ret = crypto_register_ahash(&mv_sha1_alg);
1078 if (ret == 0) 1067 if (ret == 0)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 465cde3e4f60..ba8f1ea84c5e 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -78,7 +78,6 @@
78#define FLAGS_SHA1 0x0010 78#define FLAGS_SHA1 0x0010
79#define FLAGS_DMA_ACTIVE 0x0020 79#define FLAGS_DMA_ACTIVE 0x0020
80#define FLAGS_OUTPUT_READY 0x0040 80#define FLAGS_OUTPUT_READY 0x0040
81#define FLAGS_CLEAN 0x0080
82#define FLAGS_INIT 0x0100 81#define FLAGS_INIT 0x0100
83#define FLAGS_CPU 0x0200 82#define FLAGS_CPU 0x0200
84#define FLAGS_HMAC 0x0400 83#define FLAGS_HMAC 0x0400
@@ -511,26 +510,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
511 return 0; 510 return 0;
512} 511}
513 512
514static void omap_sham_cleanup(struct ahash_request *req)
515{
516 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
517 struct omap_sham_dev *dd = ctx->dd;
518 unsigned long flags;
519
520 spin_lock_irqsave(&dd->lock, flags);
521 if (ctx->flags & FLAGS_CLEAN) {
522 spin_unlock_irqrestore(&dd->lock, flags);
523 return;
524 }
525 ctx->flags |= FLAGS_CLEAN;
526 spin_unlock_irqrestore(&dd->lock, flags);
527
528 if (ctx->digcnt)
529 omap_sham_copy_ready_hash(req);
530
531 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
532}
533
534static int omap_sham_init(struct ahash_request *req) 513static int omap_sham_init(struct ahash_request *req)
535{ 514{
536 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 515 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -618,9 +597,8 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
618 return err; 597 return err;
619} 598}
620 599
621static int omap_sham_finish_req_hmac(struct ahash_request *req) 600static int omap_sham_finish_hmac(struct ahash_request *req)
622{ 601{
623 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
624 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 602 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
625 struct omap_sham_hmac_ctx *bctx = tctx->base; 603 struct omap_sham_hmac_ctx *bctx = tctx->base;
626 int bs = crypto_shash_blocksize(bctx->shash); 604 int bs = crypto_shash_blocksize(bctx->shash);
@@ -635,7 +613,24 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req)
635 613
636 return crypto_shash_init(&desc.shash) ?: 614 return crypto_shash_init(&desc.shash) ?:
637 crypto_shash_update(&desc.shash, bctx->opad, bs) ?: 615 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
638 crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest); 616 crypto_shash_finup(&desc.shash, req->result, ds, req->result);
617}
618
619static int omap_sham_finish(struct ahash_request *req)
620{
621 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
622 struct omap_sham_dev *dd = ctx->dd;
623 int err = 0;
624
625 if (ctx->digcnt) {
626 omap_sham_copy_ready_hash(req);
627 if (ctx->flags & FLAGS_HMAC)
628 err = omap_sham_finish_hmac(req);
629 }
630
631 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
632
633 return err;
639} 634}
640 635
641static void omap_sham_finish_req(struct ahash_request *req, int err) 636static void omap_sham_finish_req(struct ahash_request *req, int err)
@@ -645,15 +640,12 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
645 640
646 if (!err) { 641 if (!err) {
647 omap_sham_copy_hash(ctx->dd->req, 1); 642 omap_sham_copy_hash(ctx->dd->req, 1);
648 if (ctx->flags & FLAGS_HMAC) 643 if (ctx->flags & FLAGS_FINAL)
649 err = omap_sham_finish_req_hmac(req); 644 err = omap_sham_finish(req);
650 } else { 645 } else {
651 ctx->flags |= FLAGS_ERROR; 646 ctx->flags |= FLAGS_ERROR;
652 } 647 }
653 648
654 if ((ctx->flags & FLAGS_FINAL) || err)
655 omap_sham_cleanup(req);
656
657 clk_disable(dd->iclk); 649 clk_disable(dd->iclk);
658 dd->flags &= ~FLAGS_BUSY; 650 dd->flags &= ~FLAGS_BUSY;
659 651
@@ -809,22 +801,21 @@ static int omap_sham_final_shash(struct ahash_request *req)
809static int omap_sham_final(struct ahash_request *req) 801static int omap_sham_final(struct ahash_request *req)
810{ 802{
811 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 803 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
812 int err = 0;
813 804
814 ctx->flags |= FLAGS_FINUP; 805 ctx->flags |= FLAGS_FINUP;
815 806
816 if (!(ctx->flags & FLAGS_ERROR)) { 807 if (ctx->flags & FLAGS_ERROR)
817 /* OMAP HW accel works only with buffers >= 9 */ 808 return 0; /* uncompleted hash is not needed */
818 /* HMAC is always >= 9 because of ipad */
819 if ((ctx->digcnt + ctx->bufcnt) < 9)
820 err = omap_sham_final_shash(req);
821 else if (ctx->bufcnt)
822 return omap_sham_enqueue(req, OP_FINAL);
823 }
824 809
825 omap_sham_cleanup(req); 810 /* OMAP HW accel works only with buffers >= 9 */
811 /* HMAC is always >= 9 because ipad == block size */
812 if ((ctx->digcnt + ctx->bufcnt) < 9)
813 return omap_sham_final_shash(req);
814 else if (ctx->bufcnt)
815 return omap_sham_enqueue(req, OP_FINAL);
826 816
827 return err; 817 /* copy ready hash (+ finalize hmac) */
818 return omap_sham_finish(req);
828} 819}
829 820
830static int omap_sham_finup(struct ahash_request *req) 821static int omap_sham_finup(struct ahash_request *req)
@@ -835,7 +826,7 @@ static int omap_sham_finup(struct ahash_request *req)
835 ctx->flags |= FLAGS_FINUP; 826 ctx->flags |= FLAGS_FINUP;
836 827
837 err1 = omap_sham_update(req); 828 err1 = omap_sham_update(req);
838 if (err1 == -EINPROGRESS) 829 if (err1 == -EINPROGRESS || err1 == -EBUSY)
839 return err1; 830 return err1;
840 /* 831 /*
841 * final() has to be always called to cleanup resources 832 * final() has to be always called to cleanup resources
@@ -890,8 +881,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
890 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 881 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
891 const char *alg_name = crypto_tfm_alg_name(tfm); 882 const char *alg_name = crypto_tfm_alg_name(tfm);
892 883
893 pr_info("enter\n");
894
895 /* Allocate a fallback and abort if it failed. */ 884 /* Allocate a fallback and abort if it failed. */
896 tctx->fallback = crypto_alloc_shash(alg_name, 0, 885 tctx->fallback = crypto_alloc_shash(alg_name, 0,
897 CRYPTO_ALG_NEED_FALLBACK); 886 CRYPTO_ALG_NEED_FALLBACK);
@@ -1297,7 +1286,8 @@ static int __init omap_sham_mod_init(void)
1297 pr_info("loading %s driver\n", "omap-sham"); 1286 pr_info("loading %s driver\n", "omap-sham");
1298 1287
1299 if (!cpu_class_is_omap2() || 1288 if (!cpu_class_is_omap2() ||
1300 omap_type() != OMAP2_DEVICE_TYPE_SEC) { 1289 (omap_type() != OMAP2_DEVICE_TYPE_SEC &&
1290 omap_type() != OMAP2_DEVICE_TYPE_EMU)) {
1301 pr_err("Unsupported cpu\n"); 1291 pr_err("Unsupported cpu\n");
1302 return -ENODEV; 1292 return -ENODEV;
1303 } 1293 }
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index adf075b6b9a8..06bdb4b2c6a6 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -288,9 +288,250 @@ static struct shash_alg sha256_alg = {
288 } 288 }
289}; 289};
290 290
291/* Add two shash_alg instance for hardware-implemented *
292* multiple-parts hash supported by VIA Nano Processor.*/
293static int padlock_sha1_init_nano(struct shash_desc *desc)
294{
295 struct sha1_state *sctx = shash_desc_ctx(desc);
296
297 *sctx = (struct sha1_state){
298 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
299 };
300
301 return 0;
302}
303
304static int padlock_sha1_update_nano(struct shash_desc *desc,
305 const u8 *data, unsigned int len)
306{
307 struct sha1_state *sctx = shash_desc_ctx(desc);
308 unsigned int partial, done;
309 const u8 *src;
310 /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
311 u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
312 ((aligned(STACK_ALIGN)));
313 u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
314 int ts_state;
315
316 partial = sctx->count & 0x3f;
317 sctx->count += len;
318 done = 0;
319 src = data;
320 memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
321
322 if ((partial + len) >= SHA1_BLOCK_SIZE) {
323
324 /* Append the bytes in state's buffer to a block to handle */
325 if (partial) {
326 done = -partial;
327 memcpy(sctx->buffer + partial, data,
328 done + SHA1_BLOCK_SIZE);
329 src = sctx->buffer;
330 ts_state = irq_ts_save();
331 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
332 : "+S"(src), "+D"(dst) \
333 : "a"((long)-1), "c"((unsigned long)1));
334 irq_ts_restore(ts_state);
335 done += SHA1_BLOCK_SIZE;
336 src = data + done;
337 }
338
339 /* Process the left bytes from the input data */
340 if (len - done >= SHA1_BLOCK_SIZE) {
341 ts_state = irq_ts_save();
342 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
343 : "+S"(src), "+D"(dst)
344 : "a"((long)-1),
345 "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
346 irq_ts_restore(ts_state);
347 done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
348 src = data + done;
349 }
350 partial = 0;
351 }
352 memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
353 memcpy(sctx->buffer + partial, src, len - done);
354
355 return 0;
356}
357
358static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
359{
360 struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
361 unsigned int partial, padlen;
362 __be64 bits;
363 static const u8 padding[64] = { 0x80, };
364
365 bits = cpu_to_be64(state->count << 3);
366
367 /* Pad out to 56 mod 64 */
368 partial = state->count & 0x3f;
369 padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
370 padlock_sha1_update_nano(desc, padding, padlen);
371
372 /* Append length field bytes */
373 padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
374
375 /* Swap to output */
376 padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
377
378 return 0;
379}
380
381static int padlock_sha256_init_nano(struct shash_desc *desc)
382{
383 struct sha256_state *sctx = shash_desc_ctx(desc);
384
385 *sctx = (struct sha256_state){
386 .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
387 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
388 };
389
390 return 0;
391}
392
393static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
394 unsigned int len)
395{
396 struct sha256_state *sctx = shash_desc_ctx(desc);
397 unsigned int partial, done;
398 const u8 *src;
399 /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
400 u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
401 ((aligned(STACK_ALIGN)));
402 u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
403 int ts_state;
404
405 partial = sctx->count & 0x3f;
406 sctx->count += len;
407 done = 0;
408 src = data;
409 memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
410
411 if ((partial + len) >= SHA256_BLOCK_SIZE) {
412
413 /* Append the bytes in state's buffer to a block to handle */
414 if (partial) {
415 done = -partial;
416 memcpy(sctx->buf + partial, data,
417 done + SHA256_BLOCK_SIZE);
418 src = sctx->buf;
419 ts_state = irq_ts_save();
420 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
421 : "+S"(src), "+D"(dst)
422 : "a"((long)-1), "c"((unsigned long)1));
423 irq_ts_restore(ts_state);
424 done += SHA256_BLOCK_SIZE;
425 src = data + done;
426 }
427
428 /* Process the left bytes from input data*/
429 if (len - done >= SHA256_BLOCK_SIZE) {
430 ts_state = irq_ts_save();
431 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
432 : "+S"(src), "+D"(dst)
433 : "a"((long)-1),
434 "c"((unsigned long)((len - done) / 64)));
435 irq_ts_restore(ts_state);
436 done += ((len - done) - (len - done) % 64);
437 src = data + done;
438 }
439 partial = 0;
440 }
441 memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
442 memcpy(sctx->buf + partial, src, len - done);
443
444 return 0;
445}
446
447static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
448{
449 struct sha256_state *state =
450 (struct sha256_state *)shash_desc_ctx(desc);
451 unsigned int partial, padlen;
452 __be64 bits;
453 static const u8 padding[64] = { 0x80, };
454
455 bits = cpu_to_be64(state->count << 3);
456
457 /* Pad out to 56 mod 64 */
458 partial = state->count & 0x3f;
459 padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
460 padlock_sha256_update_nano(desc, padding, padlen);
461
462 /* Append length field bytes */
463 padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
464
465 /* Swap to output */
466 padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
467
468 return 0;
469}
470
471static int padlock_sha_export_nano(struct shash_desc *desc,
472 void *out)
473{
474 int statesize = crypto_shash_statesize(desc->tfm);
475 void *sctx = shash_desc_ctx(desc);
476
477 memcpy(out, sctx, statesize);
478 return 0;
479}
480
481static int padlock_sha_import_nano(struct shash_desc *desc,
482 const void *in)
483{
484 int statesize = crypto_shash_statesize(desc->tfm);
485 void *sctx = shash_desc_ctx(desc);
486
487 memcpy(sctx, in, statesize);
488 return 0;
489}
490
491static struct shash_alg sha1_alg_nano = {
492 .digestsize = SHA1_DIGEST_SIZE,
493 .init = padlock_sha1_init_nano,
494 .update = padlock_sha1_update_nano,
495 .final = padlock_sha1_final_nano,
496 .export = padlock_sha_export_nano,
497 .import = padlock_sha_import_nano,
498 .descsize = sizeof(struct sha1_state),
499 .statesize = sizeof(struct sha1_state),
500 .base = {
501 .cra_name = "sha1",
502 .cra_driver_name = "sha1-padlock-nano",
503 .cra_priority = PADLOCK_CRA_PRIORITY,
504 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
505 .cra_blocksize = SHA1_BLOCK_SIZE,
506 .cra_module = THIS_MODULE,
507 }
508};
509
510static struct shash_alg sha256_alg_nano = {
511 .digestsize = SHA256_DIGEST_SIZE,
512 .init = padlock_sha256_init_nano,
513 .update = padlock_sha256_update_nano,
514 .final = padlock_sha256_final_nano,
515 .export = padlock_sha_export_nano,
516 .import = padlock_sha_import_nano,
517 .descsize = sizeof(struct sha256_state),
518 .statesize = sizeof(struct sha256_state),
519 .base = {
520 .cra_name = "sha256",
521 .cra_driver_name = "sha256-padlock-nano",
522 .cra_priority = PADLOCK_CRA_PRIORITY,
523 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
524 .cra_blocksize = SHA256_BLOCK_SIZE,
525 .cra_module = THIS_MODULE,
526 }
527};
528
291static int __init padlock_init(void) 529static int __init padlock_init(void)
292{ 530{
293 int rc = -ENODEV; 531 int rc = -ENODEV;
532 struct cpuinfo_x86 *c = &cpu_data(0);
533 struct shash_alg *sha1;
534 struct shash_alg *sha256;
294 535
295 if (!cpu_has_phe) { 536 if (!cpu_has_phe) {
296 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); 537 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
@@ -302,11 +543,21 @@ static int __init padlock_init(void)
302 return -ENODEV; 543 return -ENODEV;
303 } 544 }
304 545
305 rc = crypto_register_shash(&sha1_alg); 546 /* Register the newly added algorithm module if on *
547 * VIA Nano processor, or else just do as before */
548 if (c->x86_model < 0x0f) {
549 sha1 = &sha1_alg;
550 sha256 = &sha256_alg;
551 } else {
552 sha1 = &sha1_alg_nano;
553 sha256 = &sha256_alg_nano;
554 }
555
556 rc = crypto_register_shash(sha1);
306 if (rc) 557 if (rc)
307 goto out; 558 goto out;
308 559
309 rc = crypto_register_shash(&sha256_alg); 560 rc = crypto_register_shash(sha256);
310 if (rc) 561 if (rc)
311 goto out_unreg1; 562 goto out_unreg1;
312 563
@@ -315,7 +566,8 @@ static int __init padlock_init(void)
315 return 0; 566 return 0;
316 567
317out_unreg1: 568out_unreg1:
318 crypto_unregister_shash(&sha1_alg); 569 crypto_unregister_shash(sha1);
570
319out: 571out:
320 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); 572 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
321 return rc; 573 return rc;
@@ -323,8 +575,15 @@ out:
323 575
324static void __exit padlock_fini(void) 576static void __exit padlock_fini(void)
325{ 577{
326 crypto_unregister_shash(&sha1_alg); 578 struct cpuinfo_x86 *c = &cpu_data(0);
327 crypto_unregister_shash(&sha256_alg); 579
580 if (c->x86_model >= 0x0f) {
581 crypto_unregister_shash(&sha1_alg_nano);
582 crypto_unregister_shash(&sha256_alg_nano);
583 } else {
584 crypto_unregister_shash(&sha1_alg);
585 crypto_unregister_shash(&sha256_alg);
586 }
328} 587}
329 588
330module_init(padlock_init); 589module_init(padlock_init);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index b092d0a65837..230b5b8cda1f 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -176,6 +176,8 @@ struct spacc_aead_ctx {
176 u8 salt[AES_BLOCK_SIZE]; 176 u8 salt[AES_BLOCK_SIZE];
177}; 177};
178 178
179static int spacc_ablk_submit(struct spacc_req *req);
180
179static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) 181static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
180{ 182{
181 return alg ? container_of(alg, struct spacc_alg, alg) : NULL; 183 return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
@@ -666,6 +668,24 @@ static int spacc_aead_submit(struct spacc_req *req)
666 return -EINPROGRESS; 668 return -EINPROGRESS;
667} 669}
668 670
671static int spacc_req_submit(struct spacc_req *req);
672
673static void spacc_push(struct spacc_engine *engine)
674{
675 struct spacc_req *req;
676
677 while (!list_empty(&engine->pending) &&
678 engine->in_flight + 1 <= engine->fifo_sz) {
679
680 ++engine->in_flight;
681 req = list_first_entry(&engine->pending, struct spacc_req,
682 list);
683 list_move_tail(&req->list, &engine->in_progress);
684
685 req->result = spacc_req_submit(req);
686 }
687}
688
669/* 689/*
670 * Setup an AEAD request for processing. This will configure the engine, load 690 * Setup an AEAD request for processing. This will configure the engine, load
671 * the context and then start the packet processing. 691 * the context and then start the packet processing.
@@ -698,7 +718,8 @@ static int spacc_aead_setup(struct aead_request *req, u8 *giv,
698 718
699 err = -EINPROGRESS; 719 err = -EINPROGRESS;
700 spin_lock_irqsave(&engine->hw_lock, flags); 720 spin_lock_irqsave(&engine->hw_lock, flags);
701 if (unlikely(spacc_fifo_cmd_full(engine))) { 721 if (unlikely(spacc_fifo_cmd_full(engine)) ||
722 engine->in_flight + 1 > engine->fifo_sz) {
702 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 723 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
703 err = -EBUSY; 724 err = -EBUSY;
704 spin_unlock_irqrestore(&engine->hw_lock, flags); 725 spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -706,9 +727,8 @@ static int spacc_aead_setup(struct aead_request *req, u8 *giv,
706 } 727 }
707 list_add_tail(&dev_req->list, &engine->pending); 728 list_add_tail(&dev_req->list, &engine->pending);
708 } else { 729 } else {
709 ++engine->in_flight; 730 list_add_tail(&dev_req->list, &engine->pending);
710 list_add_tail(&dev_req->list, &engine->in_progress); 731 spacc_push(engine);
711 spacc_aead_submit(dev_req);
712 } 732 }
713 spin_unlock_irqrestore(&engine->hw_lock, flags); 733 spin_unlock_irqrestore(&engine->hw_lock, flags);
714 734
@@ -1041,7 +1061,8 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
1041 * we either stick it on the end of a pending list if we can backlog, 1061 * we either stick it on the end of a pending list if we can backlog,
1042 * or bailout with an error if not. 1062 * or bailout with an error if not.
1043 */ 1063 */
1044 if (unlikely(spacc_fifo_cmd_full(engine))) { 1064 if (unlikely(spacc_fifo_cmd_full(engine)) ||
1065 engine->in_flight + 1 > engine->fifo_sz) {
1045 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 1066 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1046 err = -EBUSY; 1067 err = -EBUSY;
1047 spin_unlock_irqrestore(&engine->hw_lock, flags); 1068 spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -1049,9 +1070,8 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
1049 } 1070 }
1050 list_add_tail(&dev_req->list, &engine->pending); 1071 list_add_tail(&dev_req->list, &engine->pending);
1051 } else { 1072 } else {
1052 ++engine->in_flight; 1073 list_add_tail(&dev_req->list, &engine->pending);
1053 list_add_tail(&dev_req->list, &engine->in_progress); 1074 spacc_push(engine);
1054 spacc_ablk_submit(dev_req);
1055 } 1075 }
1056 spin_unlock_irqrestore(&engine->hw_lock, flags); 1076 spin_unlock_irqrestore(&engine->hw_lock, flags);
1057 1077
@@ -1139,6 +1159,7 @@ static void spacc_process_done(struct spacc_engine *engine)
1139 req = list_first_entry(&engine->in_progress, struct spacc_req, 1159 req = list_first_entry(&engine->in_progress, struct spacc_req,
1140 list); 1160 list);
1141 list_move_tail(&req->list, &engine->completed); 1161 list_move_tail(&req->list, &engine->completed);
1162 --engine->in_flight;
1142 1163
1143 /* POP the status register. */ 1164 /* POP the status register. */
1144 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); 1165 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
@@ -1208,36 +1229,21 @@ static void spacc_spacc_complete(unsigned long data)
1208 struct spacc_engine *engine = (struct spacc_engine *)data; 1229 struct spacc_engine *engine = (struct spacc_engine *)data;
1209 struct spacc_req *req, *tmp; 1230 struct spacc_req *req, *tmp;
1210 unsigned long flags; 1231 unsigned long flags;
1211 int num_removed = 0;
1212 LIST_HEAD(completed); 1232 LIST_HEAD(completed);
1213 1233
1214 spin_lock_irqsave(&engine->hw_lock, flags); 1234 spin_lock_irqsave(&engine->hw_lock, flags);
1235
1215 list_splice_init(&engine->completed, &completed); 1236 list_splice_init(&engine->completed, &completed);
1237 spacc_push(engine);
1238 if (engine->in_flight)
1239 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1240
1216 spin_unlock_irqrestore(&engine->hw_lock, flags); 1241 spin_unlock_irqrestore(&engine->hw_lock, flags);
1217 1242
1218 list_for_each_entry_safe(req, tmp, &completed, list) { 1243 list_for_each_entry_safe(req, tmp, &completed, list) {
1219 ++num_removed;
1220 req->complete(req); 1244 req->complete(req);
1245 list_del(&req->list);
1221 } 1246 }
1222
1223 /* Try and fill the engine back up again. */
1224 spin_lock_irqsave(&engine->hw_lock, flags);
1225
1226 engine->in_flight -= num_removed;
1227
1228 list_for_each_entry_safe(req, tmp, &engine->pending, list) {
1229 if (spacc_fifo_cmd_full(engine))
1230 break;
1231
1232 list_move_tail(&req->list, &engine->in_progress);
1233 ++engine->in_flight;
1234 req->result = spacc_req_submit(req);
1235 }
1236
1237 if (engine->in_flight)
1238 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1239
1240 spin_unlock_irqrestore(&engine->hw_lock, flags);
1241} 1247}
1242 1248
1243#ifdef CONFIG_PM 1249#ifdef CONFIG_PM
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
new file mode 100644
index 000000000000..8115417a1c93
--- /dev/null
+++ b/drivers/crypto/s5p-sss.c
@@ -0,0 +1,701 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for Samsung S5PV210 HW acceleration.
5 *
6 * Copyright (C) 2011 NetUP Inc. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/clk.h>
21#include <linux/platform_device.h>
22#include <linux/scatterlist.h>
23#include <linux/dma-mapping.h>
24#include <linux/io.h>
25#include <linux/crypto.h>
26#include <linux/interrupt.h>
27
28#include <crypto/algapi.h>
29#include <crypto/aes.h>
30#include <crypto/ctr.h>
31
32#include <plat/cpu.h>
33#include <plat/dma.h>
34
35#define _SBF(s, v) ((v) << (s))
36#define _BIT(b) _SBF(b, 1)
37
38/* Feed control registers */
39#define SSS_REG_FCINTSTAT 0x0000
40#define SSS_FCINTSTAT_BRDMAINT _BIT(3)
41#define SSS_FCINTSTAT_BTDMAINT _BIT(2)
42#define SSS_FCINTSTAT_HRDMAINT _BIT(1)
43#define SSS_FCINTSTAT_PKDMAINT _BIT(0)
44
45#define SSS_REG_FCINTENSET 0x0004
46#define SSS_FCINTENSET_BRDMAINTENSET _BIT(3)
47#define SSS_FCINTENSET_BTDMAINTENSET _BIT(2)
48#define SSS_FCINTENSET_HRDMAINTENSET _BIT(1)
49#define SSS_FCINTENSET_PKDMAINTENSET _BIT(0)
50
51#define SSS_REG_FCINTENCLR 0x0008
52#define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3)
53#define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2)
54#define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1)
55#define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0)
56
57#define SSS_REG_FCINTPEND 0x000C
58#define SSS_FCINTPEND_BRDMAINTP _BIT(3)
59#define SSS_FCINTPEND_BTDMAINTP _BIT(2)
60#define SSS_FCINTPEND_HRDMAINTP _BIT(1)
61#define SSS_FCINTPEND_PKDMAINTP _BIT(0)
62
63#define SSS_REG_FCFIFOSTAT 0x0010
64#define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7)
65#define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6)
66#define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5)
67#define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4)
68#define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3)
69#define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2)
70#define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1)
71#define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0)
72
73#define SSS_REG_FCFIFOCTRL 0x0014
74#define SSS_FCFIFOCTRL_DESSEL _BIT(2)
75#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
76#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
77#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
78
79#define SSS_REG_FCBRDMAS 0x0020
80#define SSS_REG_FCBRDMAL 0x0024
81#define SSS_REG_FCBRDMAC 0x0028
82#define SSS_FCBRDMAC_BYTESWAP _BIT(1)
83#define SSS_FCBRDMAC_FLUSH _BIT(0)
84
85#define SSS_REG_FCBTDMAS 0x0030
86#define SSS_REG_FCBTDMAL 0x0034
87#define SSS_REG_FCBTDMAC 0x0038
88#define SSS_FCBTDMAC_BYTESWAP _BIT(1)
89#define SSS_FCBTDMAC_FLUSH _BIT(0)
90
91#define SSS_REG_FCHRDMAS 0x0040
92#define SSS_REG_FCHRDMAL 0x0044
93#define SSS_REG_FCHRDMAC 0x0048
94#define SSS_FCHRDMAC_BYTESWAP _BIT(1)
95#define SSS_FCHRDMAC_FLUSH _BIT(0)
96
97#define SSS_REG_FCPKDMAS 0x0050
98#define SSS_REG_FCPKDMAL 0x0054
99#define SSS_REG_FCPKDMAC 0x0058
100#define SSS_FCPKDMAC_BYTESWAP _BIT(3)
101#define SSS_FCPKDMAC_DESCEND _BIT(2)
102#define SSS_FCPKDMAC_TRANSMIT _BIT(1)
103#define SSS_FCPKDMAC_FLUSH _BIT(0)
104
105#define SSS_REG_FCPKDMAO 0x005C
106
107/* AES registers */
108#define SSS_REG_AES_CONTROL 0x4000
109#define SSS_AES_BYTESWAP_DI _BIT(11)
110#define SSS_AES_BYTESWAP_DO _BIT(10)
111#define SSS_AES_BYTESWAP_IV _BIT(9)
112#define SSS_AES_BYTESWAP_CNT _BIT(8)
113#define SSS_AES_BYTESWAP_KEY _BIT(7)
114#define SSS_AES_KEY_CHANGE_MODE _BIT(6)
115#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
116#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
117#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
118#define SSS_AES_FIFO_MODE _BIT(3)
119#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
120#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
121#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
122#define SSS_AES_MODE_DECRYPT _BIT(0)
123
124#define SSS_REG_AES_STATUS 0x4004
125#define SSS_AES_BUSY _BIT(2)
126#define SSS_AES_INPUT_READY _BIT(1)
127#define SSS_AES_OUTPUT_READY _BIT(0)
128
129#define SSS_REG_AES_IN_DATA(s) (0x4010 + (s << 2))
130#define SSS_REG_AES_OUT_DATA(s) (0x4020 + (s << 2))
131#define SSS_REG_AES_IV_DATA(s) (0x4030 + (s << 2))
132#define SSS_REG_AES_CNT_DATA(s) (0x4040 + (s << 2))
133#define SSS_REG_AES_KEY_DATA(s) (0x4080 + (s << 2))
134
135#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
136#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
137#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
138
139/* HW engine modes */
140#define FLAGS_AES_DECRYPT _BIT(0)
141#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
142#define FLAGS_AES_CBC _SBF(1, 0x01)
143#define FLAGS_AES_CTR _SBF(1, 0x02)
144
145#define AES_KEY_LEN 16
146#define CRYPTO_QUEUE_LEN 1
147
148struct s5p_aes_reqctx {
149 unsigned long mode;
150};
151
152struct s5p_aes_ctx {
153 struct s5p_aes_dev *dev;
154
155 uint8_t aes_key[AES_MAX_KEY_SIZE];
156 uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
157 int keylen;
158};
159
160struct s5p_aes_dev {
161 struct device *dev;
162 struct clk *clk;
163 void __iomem *ioaddr;
164 int irq_hash;
165 int irq_fc;
166
167 struct ablkcipher_request *req;
168 struct s5p_aes_ctx *ctx;
169 struct scatterlist *sg_src;
170 struct scatterlist *sg_dst;
171
172 struct tasklet_struct tasklet;
173 struct crypto_queue queue;
174 bool busy;
175 spinlock_t lock;
176};
177
178static struct s5p_aes_dev *s5p_dev;
179
180static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
181{
182 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
183 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
184}
185
186static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
187{
188 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
189 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
190}
191
192static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
193{
194 /* holding a lock outside */
195 dev->req->base.complete(&dev->req->base, err);
196 dev->busy = false;
197}
198
199static void s5p_unset_outdata(struct s5p_aes_dev *dev)
200{
201 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
202}
203
204static void s5p_unset_indata(struct s5p_aes_dev *dev)
205{
206 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
207}
208
209static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
210{
211 int err;
212
213 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
214 err = -EINVAL;
215 goto exit;
216 }
217 if (!sg_dma_len(sg)) {
218 err = -EINVAL;
219 goto exit;
220 }
221
222 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
223 if (!err) {
224 err = -ENOMEM;
225 goto exit;
226 }
227
228 dev->sg_dst = sg;
229 err = 0;
230
231 exit:
232 return err;
233}
234
235static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
236{
237 int err;
238
239 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
240 err = -EINVAL;
241 goto exit;
242 }
243 if (!sg_dma_len(sg)) {
244 err = -EINVAL;
245 goto exit;
246 }
247
248 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
249 if (!err) {
250 err = -ENOMEM;
251 goto exit;
252 }
253
254 dev->sg_src = sg;
255 err = 0;
256
257 exit:
258 return err;
259}
260
261static void s5p_aes_tx(struct s5p_aes_dev *dev)
262{
263 int err = 0;
264
265 s5p_unset_outdata(dev);
266
267 if (!sg_is_last(dev->sg_dst)) {
268 err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
269 if (err) {
270 s5p_aes_complete(dev, err);
271 return;
272 }
273
274 s5p_set_dma_outdata(dev, dev->sg_dst);
275 } else
276 s5p_aes_complete(dev, err);
277}
278
279static void s5p_aes_rx(struct s5p_aes_dev *dev)
280{
281 int err;
282
283 s5p_unset_indata(dev);
284
285 if (!sg_is_last(dev->sg_src)) {
286 err = s5p_set_indata(dev, sg_next(dev->sg_src));
287 if (err) {
288 s5p_aes_complete(dev, err);
289 return;
290 }
291
292 s5p_set_dma_indata(dev, dev->sg_src);
293 }
294}
295
296static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
297{
298 struct platform_device *pdev = dev_id;
299 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
300 uint32_t status;
301 unsigned long flags;
302
303 spin_lock_irqsave(&dev->lock, flags);
304
305 if (irq == dev->irq_fc) {
306 status = SSS_READ(dev, FCINTSTAT);
307 if (status & SSS_FCINTSTAT_BRDMAINT)
308 s5p_aes_rx(dev);
309 if (status & SSS_FCINTSTAT_BTDMAINT)
310 s5p_aes_tx(dev);
311
312 SSS_WRITE(dev, FCINTPEND, status);
313 }
314
315 spin_unlock_irqrestore(&dev->lock, flags);
316
317 return IRQ_HANDLED;
318}
319
320static void s5p_set_aes(struct s5p_aes_dev *dev,
321 uint8_t *key, uint8_t *iv, unsigned int keylen)
322{
323 void __iomem *keystart;
324
325 memcpy(dev->ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
326
327 if (keylen == AES_KEYSIZE_256)
328 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(0);
329 else if (keylen == AES_KEYSIZE_192)
330 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(2);
331 else
332 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(4);
333
334 memcpy(keystart, key, keylen);
335}
336
337static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
338{
339 struct ablkcipher_request *req = dev->req;
340
341 uint32_t aes_control;
342 int err;
343 unsigned long flags;
344
345 aes_control = SSS_AES_KEY_CHANGE_MODE;
346 if (mode & FLAGS_AES_DECRYPT)
347 aes_control |= SSS_AES_MODE_DECRYPT;
348
349 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
350 aes_control |= SSS_AES_CHAIN_MODE_CBC;
351 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
352 aes_control |= SSS_AES_CHAIN_MODE_CTR;
353
354 if (dev->ctx->keylen == AES_KEYSIZE_192)
355 aes_control |= SSS_AES_KEY_SIZE_192;
356 else if (dev->ctx->keylen == AES_KEYSIZE_256)
357 aes_control |= SSS_AES_KEY_SIZE_256;
358
359 aes_control |= SSS_AES_FIFO_MODE;
360
361 /* as a variant it is possible to use byte swapping on DMA side */
362 aes_control |= SSS_AES_BYTESWAP_DI
363 | SSS_AES_BYTESWAP_DO
364 | SSS_AES_BYTESWAP_IV
365 | SSS_AES_BYTESWAP_KEY
366 | SSS_AES_BYTESWAP_CNT;
367
368 spin_lock_irqsave(&dev->lock, flags);
369
370 SSS_WRITE(dev, FCINTENCLR,
371 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
372 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
373
374 err = s5p_set_indata(dev, req->src);
375 if (err)
376 goto indata_error;
377
378 err = s5p_set_outdata(dev, req->dst);
379 if (err)
380 goto outdata_error;
381
382 SSS_WRITE(dev, AES_CONTROL, aes_control);
383 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
384
385 s5p_set_dma_indata(dev, req->src);
386 s5p_set_dma_outdata(dev, req->dst);
387
388 SSS_WRITE(dev, FCINTENSET,
389 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
390
391 spin_unlock_irqrestore(&dev->lock, flags);
392
393 return;
394
395 outdata_error:
396 s5p_unset_indata(dev);
397
398 indata_error:
399 s5p_aes_complete(dev, err);
400 spin_unlock_irqrestore(&dev->lock, flags);
401}
402
403static void s5p_tasklet_cb(unsigned long data)
404{
405 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
406 struct crypto_async_request *async_req, *backlog;
407 struct s5p_aes_reqctx *reqctx;
408 unsigned long flags;
409
410 spin_lock_irqsave(&dev->lock, flags);
411 backlog = crypto_get_backlog(&dev->queue);
412 async_req = crypto_dequeue_request(&dev->queue);
413 spin_unlock_irqrestore(&dev->lock, flags);
414
415 if (!async_req)
416 return;
417
418 if (backlog)
419 backlog->complete(backlog, -EINPROGRESS);
420
421 dev->req = ablkcipher_request_cast(async_req);
422 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
423 reqctx = ablkcipher_request_ctx(dev->req);
424
425 s5p_aes_crypt_start(dev, reqctx->mode);
426}
427
428static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
429 struct ablkcipher_request *req)
430{
431 unsigned long flags;
432 int err;
433
434 spin_lock_irqsave(&dev->lock, flags);
435 if (dev->busy) {
436 err = -EAGAIN;
437 spin_unlock_irqrestore(&dev->lock, flags);
438 goto exit;
439 }
440 dev->busy = true;
441
442 err = ablkcipher_enqueue_request(&dev->queue, req);
443 spin_unlock_irqrestore(&dev->lock, flags);
444
445 tasklet_schedule(&dev->tasklet);
446
447 exit:
448 return err;
449}
450
451static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
452{
453 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
454 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
455 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
456 struct s5p_aes_dev *dev = ctx->dev;
457
458 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
459 pr_err("request size is not exact amount of AES blocks\n");
460 return -EINVAL;
461 }
462
463 reqctx->mode = mode;
464
465 return s5p_aes_handle_req(dev, req);
466}
467
468static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
469 const uint8_t *key, unsigned int keylen)
470{
471 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
472 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
473
474 if (keylen != AES_KEYSIZE_128 &&
475 keylen != AES_KEYSIZE_192 &&
476 keylen != AES_KEYSIZE_256)
477 return -EINVAL;
478
479 memcpy(ctx->aes_key, key, keylen);
480 ctx->keylen = keylen;
481
482 return 0;
483}
484
485static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
486{
487 return s5p_aes_crypt(req, 0);
488}
489
490static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
491{
492 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
493}
494
495static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
496{
497 return s5p_aes_crypt(req, FLAGS_AES_CBC);
498}
499
500static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
501{
502 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
503}
504
505static int s5p_aes_cra_init(struct crypto_tfm *tfm)
506{
507 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
508
509 ctx->dev = s5p_dev;
510 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
511
512 return 0;
513}
514
515static struct crypto_alg algs[] = {
516 {
517 .cra_name = "ecb(aes)",
518 .cra_driver_name = "ecb-aes-s5p",
519 .cra_priority = 100,
520 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
521 CRYPTO_ALG_ASYNC,
522 .cra_blocksize = AES_BLOCK_SIZE,
523 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
524 .cra_alignmask = 0x0f,
525 .cra_type = &crypto_ablkcipher_type,
526 .cra_module = THIS_MODULE,
527 .cra_init = s5p_aes_cra_init,
528 .cra_u.ablkcipher = {
529 .min_keysize = AES_MIN_KEY_SIZE,
530 .max_keysize = AES_MAX_KEY_SIZE,
531 .setkey = s5p_aes_setkey,
532 .encrypt = s5p_aes_ecb_encrypt,
533 .decrypt = s5p_aes_ecb_decrypt,
534 }
535 },
536 {
537 .cra_name = "cbc(aes)",
538 .cra_driver_name = "cbc-aes-s5p",
539 .cra_priority = 100,
540 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
541 CRYPTO_ALG_ASYNC,
542 .cra_blocksize = AES_BLOCK_SIZE,
543 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
544 .cra_alignmask = 0x0f,
545 .cra_type = &crypto_ablkcipher_type,
546 .cra_module = THIS_MODULE,
547 .cra_init = s5p_aes_cra_init,
548 .cra_u.ablkcipher = {
549 .min_keysize = AES_MIN_KEY_SIZE,
550 .max_keysize = AES_MAX_KEY_SIZE,
551 .ivsize = AES_BLOCK_SIZE,
552 .setkey = s5p_aes_setkey,
553 .encrypt = s5p_aes_cbc_encrypt,
554 .decrypt = s5p_aes_cbc_decrypt,
555 }
556 },
557};
558
559static int s5p_aes_probe(struct platform_device *pdev)
560{
561 int i, j, err = -ENODEV;
562 struct s5p_aes_dev *pdata;
563 struct device *dev = &pdev->dev;
564 struct resource *res;
565
566 if (s5p_dev)
567 return -EEXIST;
568
569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
570 if (!res)
571 return -ENODEV;
572
573 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
574 if (!pdata)
575 return -ENOMEM;
576
577 if (!devm_request_mem_region(dev, res->start,
578 resource_size(res), pdev->name))
579 return -EBUSY;
580
581 pdata->clk = clk_get(dev, "secss");
582 if (IS_ERR(pdata->clk)) {
583 dev_err(dev, "failed to find secss clock source\n");
584 return -ENOENT;
585 }
586
587 clk_enable(pdata->clk);
588
589 spin_lock_init(&pdata->lock);
590 pdata->ioaddr = devm_ioremap(dev, res->start,
591 resource_size(res));
592
593 pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
594 if (pdata->irq_hash < 0) {
595 err = pdata->irq_hash;
596 dev_warn(dev, "hash interrupt is not available.\n");
597 goto err_irq;
598 }
599 err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
600 IRQF_SHARED, pdev->name, pdev);
601 if (err < 0) {
602 dev_warn(dev, "hash interrupt is not available.\n");
603 goto err_irq;
604 }
605
606 pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
607 if (pdata->irq_fc < 0) {
608 err = pdata->irq_fc;
609 dev_warn(dev, "feed control interrupt is not available.\n");
610 goto err_irq;
611 }
612 err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
613 IRQF_SHARED, pdev->name, pdev);
614 if (err < 0) {
615 dev_warn(dev, "feed control interrupt is not available.\n");
616 goto err_irq;
617 }
618
619 pdata->dev = dev;
620 platform_set_drvdata(pdev, pdata);
621 s5p_dev = pdata;
622
623 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
624 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
625
626 for (i = 0; i < ARRAY_SIZE(algs); i++) {
627 INIT_LIST_HEAD(&algs[i].cra_list);
628 err = crypto_register_alg(&algs[i]);
629 if (err)
630 goto err_algs;
631 }
632
633 pr_info("s5p-sss driver registered\n");
634
635 return 0;
636
637 err_algs:
638 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
639
640 for (j = 0; j < i; j++)
641 crypto_unregister_alg(&algs[j]);
642
643 tasklet_kill(&pdata->tasklet);
644
645 err_irq:
646 clk_disable(pdata->clk);
647 clk_put(pdata->clk);
648
649 s5p_dev = NULL;
650 platform_set_drvdata(pdev, NULL);
651
652 return err;
653}
654
655static int s5p_aes_remove(struct platform_device *pdev)
656{
657 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
658 int i;
659
660 if (!pdata)
661 return -ENODEV;
662
663 for (i = 0; i < ARRAY_SIZE(algs); i++)
664 crypto_unregister_alg(&algs[i]);
665
666 tasklet_kill(&pdata->tasklet);
667
668 clk_disable(pdata->clk);
669 clk_put(pdata->clk);
670
671 s5p_dev = NULL;
672 platform_set_drvdata(pdev, NULL);
673
674 return 0;
675}
676
677static struct platform_driver s5p_aes_crypto = {
678 .probe = s5p_aes_probe,
679 .remove = s5p_aes_remove,
680 .driver = {
681 .owner = THIS_MODULE,
682 .name = "s5p-secss",
683 },
684};
685
686static int __init s5p_aes_mod_init(void)
687{
688 return platform_driver_register(&s5p_aes_crypto);
689}
690
691static void __exit s5p_aes_mod_exit(void)
692{
693 platform_driver_unregister(&s5p_aes_crypto);
694}
695
696module_init(s5p_aes_mod_init);
697module_exit(s5p_aes_mod_exit);
698
699MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
700MODULE_LICENSE("GPL v2");
701MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");