diff options
108 files changed, 20473 insertions, 583 deletions
diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt new file mode 100644 index 000000000000..8c61183b41e0 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt | |||
@@ -0,0 +1,19 @@ | |||
1 | * AMD Cryptographic Coprocessor driver (ccp) | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "amd,ccp-seattle-v1a" | ||
5 | - reg: Address and length of the register set for the device | ||
6 | - interrupt-parent: Should be the phandle for the interrupt controller | ||
7 | that services interrupts for this device | ||
8 | - interrupts: Should contain the CCP interrupt | ||
9 | |||
10 | Optional properties: | ||
11 | - dma-coherent: Present if dma operations are coherent | ||
12 | |||
13 | Example: | ||
14 | ccp@e0100000 { | ||
15 | compatible = "amd,ccp-seattle-v1a"; | ||
16 | reg = <0 0xe0100000 0 0x10000>; | ||
17 | interrupt-parent = <&gic>; | ||
18 | interrupts = <0 3 4>; | ||
19 | }; | ||
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.txt b/Documentation/devicetree/bindings/crypto/qcom-qce.txt new file mode 100644 index 000000000000..fdd53b184ba8 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/qcom-qce.txt | |||
@@ -0,0 +1,25 @@ | |||
1 | Qualcomm crypto engine driver | ||
2 | |||
3 | Required properties: | ||
4 | |||
5 | - compatible : should be "qcom,crypto-v5.1" | ||
6 | - reg : specifies base physical address and size of the registers map | ||
7 | - clocks : phandle to clock-controller plus clock-specifier pair | ||
8 | - clock-names : "iface" clocks register interface | ||
9 | "bus" clocks data transfer interface | ||
10 | "core" clocks rest of the crypto block | ||
11 | - dmas : DMA specifiers for tx and rx dma channels. For more see | ||
12 | Documentation/devicetree/bindings/dma/dma.txt | ||
13 | - dma-names : DMA request names should be "rx" and "tx" | ||
14 | |||
15 | Example: | ||
16 | crypto@fd45a000 { | ||
17 | compatible = "qcom,crypto-v5.1"; | ||
18 | reg = <0xfd45a000 0x6000>; | ||
19 | clocks = <&gcc GCC_CE2_AHB_CLK>, | ||
20 | <&gcc GCC_CE2_AXI_CLK>, | ||
21 | <&gcc GCC_CE2_CLK>; | ||
22 | clock-names = "iface", "bus", "core"; | ||
23 | dmas = <&cryptobam 2>, <&cryptobam 3>; | ||
24 | dma-names = "rx", "tx"; | ||
25 | }; | ||
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index d7e43fa88575..7e240a7c9ab1 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt | |||
@@ -197,6 +197,7 @@ Code Seq#(hex) Include File Comments | |||
197 | <mailto:gregkh@linuxfoundation.org> | 197 | <mailto:gregkh@linuxfoundation.org> |
198 | 'a' all linux/atm*.h, linux/sonet.h ATM on linux | 198 | 'a' all linux/atm*.h, linux/sonet.h ATM on linux |
199 | <http://lrcwww.epfl.ch/> | 199 | <http://lrcwww.epfl.ch/> |
200 | 'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver | ||
200 | 'b' 00-FF conflict! bit3 vme host bridge | 201 | 'b' 00-FF conflict! bit3 vme host bridge |
201 | <mailto:natalia@nikhefk.nikhef.nl> | 202 | <mailto:natalia@nikhefk.nikhef.nl> |
202 | 'c' all linux/cm4000_cs.h conflict! | 203 | 'c' all linux/cm4000_cs.h conflict! |
diff --git a/MAINTAINERS b/MAINTAINERS index 4f05c699daf8..f167eb17b5e4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -7250,6 +7250,12 @@ M: Robert Jarzmik <robert.jarzmik@free.fr> | |||
7250 | L: rtc-linux@googlegroups.com | 7250 | L: rtc-linux@googlegroups.com |
7251 | S: Maintained | 7251 | S: Maintained |
7252 | 7252 | ||
7253 | QAT DRIVER | ||
7254 | M: Tadeusz Struk <tadeusz.struk@intel.com> | ||
7255 | L: qat-linux@intel.com | ||
7256 | S: Supported | ||
7257 | F: drivers/crypto/qat/ | ||
7258 | |||
7253 | QIB DRIVER | 7259 | QIB DRIVER |
7254 | M: Mike Marciniszyn <infinipath@intel.com> | 7260 | M: Mike Marciniszyn <infinipath@intel.com> |
7255 | L: linux-rdma@vger.kernel.org | 7261 | L: linux-rdma@vger.kernel.org |
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi index f75b4f820c3c..7d4a6a2354f4 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi | |||
@@ -32,7 +32,8 @@ | |||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | compatible = "fsl,sec-v6.0"; | 35 | compatible = "fsl,sec-v6.0", "fsl,sec-v5.0", |
36 | "fsl,sec-v4.0"; | ||
36 | fsl,sec-era = <6>; | 37 | fsl,sec-era = <6>; |
37 | #address-cells = <1>; | 38 | #address-cells = <1>; |
38 | #size-cells = <1>; | 39 | #size-cells = <1>; |
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 61d6e281898b..d551165a3159 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
@@ -14,6 +14,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o | |||
14 | obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o | 14 | obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o |
15 | 15 | ||
16 | obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o | 16 | obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o |
17 | obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o | ||
17 | obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o | 18 | obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o |
18 | obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o | 19 | obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o |
19 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o | 20 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o |
@@ -52,6 +53,7 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o | |||
52 | serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o | 53 | serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o |
53 | 54 | ||
54 | aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o | 55 | aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o |
56 | des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o | ||
55 | camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o | 57 | camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o |
56 | blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o | 58 | blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o |
57 | twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o | 59 | twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o |
@@ -76,7 +78,7 @@ ifeq ($(avx2_supported),yes) | |||
76 | endif | 78 | endif |
77 | 79 | ||
78 | aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o | 80 | aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o |
79 | aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o | 81 | aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o |
80 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o | 82 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o |
81 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o | 83 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o |
82 | ifeq ($(avx2_supported),yes) | 84 | ifeq ($(avx2_supported),yes) |
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S new file mode 100644 index 000000000000..f091f122ed24 --- /dev/null +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S | |||
@@ -0,0 +1,546 @@ | |||
1 | /* | ||
2 | * Implement AES CTR mode by8 optimization with AVX instructions. (x86_64) | ||
3 | * | ||
4 | * This is AES128/192/256 CTR mode optimization implementation. It requires | ||
5 | * the support of Intel(R) AESNI and AVX instructions. | ||
6 | * | ||
7 | * This work was inspired by the AES CTR mode optimization published | ||
8 | * in Intel Optimized IPSEC Cryptograhpic library. | ||
9 | * Additional information on it can be found at: | ||
10 | * http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972 | ||
11 | * | ||
12 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
13 | * redistributing this file, you may do so under either license. | ||
14 | * | ||
15 | * GPL LICENSE SUMMARY | ||
16 | * | ||
17 | * Copyright(c) 2014 Intel Corporation. | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or modify | ||
20 | * it under the terms of version 2 of the GNU General Public License as | ||
21 | * published by the Free Software Foundation. | ||
22 | * | ||
23 | * This program is distributed in the hope that it will be useful, but | ||
24 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
26 | * General Public License for more details. | ||
27 | * | ||
28 | * Contact Information: | ||
29 | * James Guilford <james.guilford@intel.com> | ||
30 | * Sean Gulley <sean.m.gulley@intel.com> | ||
31 | * Chandramouli Narayanan <mouli@linux.intel.com> | ||
32 | * | ||
33 | * BSD LICENSE | ||
34 | * | ||
35 | * Copyright(c) 2014 Intel Corporation. | ||
36 | * | ||
37 | * Redistribution and use in source and binary forms, with or without | ||
38 | * modification, are permitted provided that the following conditions | ||
39 | * are met: | ||
40 | * | ||
41 | * Redistributions of source code must retain the above copyright | ||
42 | * notice, this list of conditions and the following disclaimer. | ||
43 | * Redistributions in binary form must reproduce the above copyright | ||
44 | * notice, this list of conditions and the following disclaimer in | ||
45 | * the documentation and/or other materials provided with the | ||
46 | * distribution. | ||
47 | * Neither the name of Intel Corporation nor the names of its | ||
48 | * contributors may be used to endorse or promote products derived | ||
49 | * from this software without specific prior written permission. | ||
50 | * | ||
51 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
52 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
53 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
54 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
55 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
56 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
57 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
58 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
59 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
60 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
61 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
62 | * | ||
63 | */ | ||
64 | |||
65 | #include <linux/linkage.h> | ||
66 | #include <asm/inst.h> | ||
67 | |||
68 | #define CONCAT(a,b) a##b | ||
69 | #define VMOVDQ vmovdqu | ||
70 | |||
71 | #define xdata0 %xmm0 | ||
72 | #define xdata1 %xmm1 | ||
73 | #define xdata2 %xmm2 | ||
74 | #define xdata3 %xmm3 | ||
75 | #define xdata4 %xmm4 | ||
76 | #define xdata5 %xmm5 | ||
77 | #define xdata6 %xmm6 | ||
78 | #define xdata7 %xmm7 | ||
79 | #define xcounter %xmm8 | ||
80 | #define xbyteswap %xmm9 | ||
81 | #define xkey0 %xmm10 | ||
82 | #define xkey3 %xmm11 | ||
83 | #define xkey6 %xmm12 | ||
84 | #define xkey9 %xmm13 | ||
85 | #define xkey4 %xmm11 | ||
86 | #define xkey8 %xmm12 | ||
87 | #define xkey12 %xmm13 | ||
88 | #define xkeyA %xmm14 | ||
89 | #define xkeyB %xmm15 | ||
90 | |||
91 | #define p_in %rdi | ||
92 | #define p_iv %rsi | ||
93 | #define p_keys %rdx | ||
94 | #define p_out %rcx | ||
95 | #define num_bytes %r8 | ||
96 | |||
97 | #define tmp %r10 | ||
98 | #define DDQ(i) CONCAT(ddq_add_,i) | ||
99 | #define XMM(i) CONCAT(%xmm, i) | ||
100 | #define DDQ_DATA 0 | ||
101 | #define XDATA 1 | ||
102 | #define KEY_128 1 | ||
103 | #define KEY_192 2 | ||
104 | #define KEY_256 3 | ||
105 | |||
106 | .section .rodata | ||
107 | .align 16 | ||
108 | |||
109 | byteswap_const: | ||
110 | .octa 0x000102030405060708090A0B0C0D0E0F | ||
111 | ddq_add_1: | ||
112 | .octa 0x00000000000000000000000000000001 | ||
113 | ddq_add_2: | ||
114 | .octa 0x00000000000000000000000000000002 | ||
115 | ddq_add_3: | ||
116 | .octa 0x00000000000000000000000000000003 | ||
117 | ddq_add_4: | ||
118 | .octa 0x00000000000000000000000000000004 | ||
119 | ddq_add_5: | ||
120 | .octa 0x00000000000000000000000000000005 | ||
121 | ddq_add_6: | ||
122 | .octa 0x00000000000000000000000000000006 | ||
123 | ddq_add_7: | ||
124 | .octa 0x00000000000000000000000000000007 | ||
125 | ddq_add_8: | ||
126 | .octa 0x00000000000000000000000000000008 | ||
127 | |||
128 | .text | ||
129 | |||
130 | /* generate a unique variable for ddq_add_x */ | ||
131 | |||
132 | .macro setddq n | ||
133 | var_ddq_add = DDQ(\n) | ||
134 | .endm | ||
135 | |||
136 | /* generate a unique variable for xmm register */ | ||
137 | .macro setxdata n | ||
138 | var_xdata = XMM(\n) | ||
139 | .endm | ||
140 | |||
141 | /* club the numeric 'id' to the symbol 'name' */ | ||
142 | |||
143 | .macro club name, id | ||
144 | .altmacro | ||
145 | .if \name == DDQ_DATA | ||
146 | setddq %\id | ||
147 | .elseif \name == XDATA | ||
148 | setxdata %\id | ||
149 | .endif | ||
150 | .noaltmacro | ||
151 | .endm | ||
152 | |||
153 | /* | ||
154 | * do_aes num_in_par load_keys key_len | ||
155 | * This increments p_in, but not p_out | ||
156 | */ | ||
157 | .macro do_aes b, k, key_len | ||
158 | .set by, \b | ||
159 | .set load_keys, \k | ||
160 | .set klen, \key_len | ||
161 | |||
162 | .if (load_keys) | ||
163 | vmovdqa 0*16(p_keys), xkey0 | ||
164 | .endif | ||
165 | |||
166 | vpshufb xbyteswap, xcounter, xdata0 | ||
167 | |||
168 | .set i, 1 | ||
169 | .rept (by - 1) | ||
170 | club DDQ_DATA, i | ||
171 | club XDATA, i | ||
172 | vpaddd var_ddq_add(%rip), xcounter, var_xdata | ||
173 | vpshufb xbyteswap, var_xdata, var_xdata | ||
174 | .set i, (i +1) | ||
175 | .endr | ||
176 | |||
177 | vmovdqa 1*16(p_keys), xkeyA | ||
178 | |||
179 | vpxor xkey0, xdata0, xdata0 | ||
180 | club DDQ_DATA, by | ||
181 | vpaddd var_ddq_add(%rip), xcounter, xcounter | ||
182 | |||
183 | .set i, 1 | ||
184 | .rept (by - 1) | ||
185 | club XDATA, i | ||
186 | vpxor xkey0, var_xdata, var_xdata | ||
187 | .set i, (i +1) | ||
188 | .endr | ||
189 | |||
190 | vmovdqa 2*16(p_keys), xkeyB | ||
191 | |||
192 | .set i, 0 | ||
193 | .rept by | ||
194 | club XDATA, i | ||
195 | vaesenc xkeyA, var_xdata, var_xdata /* key 1 */ | ||
196 | .set i, (i +1) | ||
197 | .endr | ||
198 | |||
199 | .if (klen == KEY_128) | ||
200 | .if (load_keys) | ||
201 | vmovdqa 3*16(p_keys), xkeyA | ||
202 | .endif | ||
203 | .else | ||
204 | vmovdqa 3*16(p_keys), xkeyA | ||
205 | .endif | ||
206 | |||
207 | .set i, 0 | ||
208 | .rept by | ||
209 | club XDATA, i | ||
210 | vaesenc xkeyB, var_xdata, var_xdata /* key 2 */ | ||
211 | .set i, (i +1) | ||
212 | .endr | ||
213 | |||
214 | add $(16*by), p_in | ||
215 | |||
216 | .if (klen == KEY_128) | ||
217 | vmovdqa 4*16(p_keys), xkey4 | ||
218 | .else | ||
219 | .if (load_keys) | ||
220 | vmovdqa 4*16(p_keys), xkey4 | ||
221 | .endif | ||
222 | .endif | ||
223 | |||
224 | .set i, 0 | ||
225 | .rept by | ||
226 | club XDATA, i | ||
227 | vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ | ||
228 | .set i, (i +1) | ||
229 | .endr | ||
230 | |||
231 | vmovdqa 5*16(p_keys), xkeyA | ||
232 | |||
233 | .set i, 0 | ||
234 | .rept by | ||
235 | club XDATA, i | ||
236 | vaesenc xkey4, var_xdata, var_xdata /* key 4 */ | ||
237 | .set i, (i +1) | ||
238 | .endr | ||
239 | |||
240 | .if (klen == KEY_128) | ||
241 | .if (load_keys) | ||
242 | vmovdqa 6*16(p_keys), xkeyB | ||
243 | .endif | ||
244 | .else | ||
245 | vmovdqa 6*16(p_keys), xkeyB | ||
246 | .endif | ||
247 | |||
248 | .set i, 0 | ||
249 | .rept by | ||
250 | club XDATA, i | ||
251 | vaesenc xkeyA, var_xdata, var_xdata /* key 5 */ | ||
252 | .set i, (i +1) | ||
253 | .endr | ||
254 | |||
255 | vmovdqa 7*16(p_keys), xkeyA | ||
256 | |||
257 | .set i, 0 | ||
258 | .rept by | ||
259 | club XDATA, i | ||
260 | vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ | ||
261 | .set i, (i +1) | ||
262 | .endr | ||
263 | |||
264 | .if (klen == KEY_128) | ||
265 | vmovdqa 8*16(p_keys), xkey8 | ||
266 | .else | ||
267 | .if (load_keys) | ||
268 | vmovdqa 8*16(p_keys), xkey8 | ||
269 | .endif | ||
270 | .endif | ||
271 | |||
272 | .set i, 0 | ||
273 | .rept by | ||
274 | club XDATA, i | ||
275 | vaesenc xkeyA, var_xdata, var_xdata /* key 7 */ | ||
276 | .set i, (i +1) | ||
277 | .endr | ||
278 | |||
279 | .if (klen == KEY_128) | ||
280 | .if (load_keys) | ||
281 | vmovdqa 9*16(p_keys), xkeyA | ||
282 | .endif | ||
283 | .else | ||
284 | vmovdqa 9*16(p_keys), xkeyA | ||
285 | .endif | ||
286 | |||
287 | .set i, 0 | ||
288 | .rept by | ||
289 | club XDATA, i | ||
290 | vaesenc xkey8, var_xdata, var_xdata /* key 8 */ | ||
291 | .set i, (i +1) | ||
292 | .endr | ||
293 | |||
294 | vmovdqa 10*16(p_keys), xkeyB | ||
295 | |||
296 | .set i, 0 | ||
297 | .rept by | ||
298 | club XDATA, i | ||
299 | vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ | ||
300 | .set i, (i +1) | ||
301 | .endr | ||
302 | |||
303 | .if (klen != KEY_128) | ||
304 | vmovdqa 11*16(p_keys), xkeyA | ||
305 | .endif | ||
306 | |||
307 | .set i, 0 | ||
308 | .rept by | ||
309 | club XDATA, i | ||
310 | /* key 10 */ | ||
311 | .if (klen == KEY_128) | ||
312 | vaesenclast xkeyB, var_xdata, var_xdata | ||
313 | .else | ||
314 | vaesenc xkeyB, var_xdata, var_xdata | ||
315 | .endif | ||
316 | .set i, (i +1) | ||
317 | .endr | ||
318 | |||
319 | .if (klen != KEY_128) | ||
320 | .if (load_keys) | ||
321 | vmovdqa 12*16(p_keys), xkey12 | ||
322 | .endif | ||
323 | |||
324 | .set i, 0 | ||
325 | .rept by | ||
326 | club XDATA, i | ||
327 | vaesenc xkeyA, var_xdata, var_xdata /* key 11 */ | ||
328 | .set i, (i +1) | ||
329 | .endr | ||
330 | |||
331 | .if (klen == KEY_256) | ||
332 | vmovdqa 13*16(p_keys), xkeyA | ||
333 | .endif | ||
334 | |||
335 | .set i, 0 | ||
336 | .rept by | ||
337 | club XDATA, i | ||
338 | .if (klen == KEY_256) | ||
339 | /* key 12 */ | ||
340 | vaesenc xkey12, var_xdata, var_xdata | ||
341 | .else | ||
342 | vaesenclast xkey12, var_xdata, var_xdata | ||
343 | .endif | ||
344 | .set i, (i +1) | ||
345 | .endr | ||
346 | |||
347 | .if (klen == KEY_256) | ||
348 | vmovdqa 14*16(p_keys), xkeyB | ||
349 | |||
350 | .set i, 0 | ||
351 | .rept by | ||
352 | club XDATA, i | ||
353 | /* key 13 */ | ||
354 | vaesenc xkeyA, var_xdata, var_xdata | ||
355 | .set i, (i +1) | ||
356 | .endr | ||
357 | |||
358 | .set i, 0 | ||
359 | .rept by | ||
360 | club XDATA, i | ||
361 | /* key 14 */ | ||
362 | vaesenclast xkeyB, var_xdata, var_xdata | ||
363 | .set i, (i +1) | ||
364 | .endr | ||
365 | .endif | ||
366 | .endif | ||
367 | |||
368 | .set i, 0 | ||
369 | .rept (by / 2) | ||
370 | .set j, (i+1) | ||
371 | VMOVDQ (i*16 - 16*by)(p_in), xkeyA | ||
372 | VMOVDQ (j*16 - 16*by)(p_in), xkeyB | ||
373 | club XDATA, i | ||
374 | vpxor xkeyA, var_xdata, var_xdata | ||
375 | club XDATA, j | ||
376 | vpxor xkeyB, var_xdata, var_xdata | ||
377 | .set i, (i+2) | ||
378 | .endr | ||
379 | |||
380 | .if (i < by) | ||
381 | VMOVDQ (i*16 - 16*by)(p_in), xkeyA | ||
382 | club XDATA, i | ||
383 | vpxor xkeyA, var_xdata, var_xdata | ||
384 | .endif | ||
385 | |||
386 | .set i, 0 | ||
387 | .rept by | ||
388 | club XDATA, i | ||
389 | VMOVDQ var_xdata, i*16(p_out) | ||
390 | .set i, (i+1) | ||
391 | .endr | ||
392 | .endm | ||
393 | |||
394 | .macro do_aes_load val, key_len | ||
395 | do_aes \val, 1, \key_len | ||
396 | .endm | ||
397 | |||
398 | .macro do_aes_noload val, key_len | ||
399 | do_aes \val, 0, \key_len | ||
400 | .endm | ||
401 | |||
402 | /* main body of aes ctr load */ | ||
403 | |||
404 | .macro do_aes_ctrmain key_len | ||
405 | |||
406 | cmp $16, num_bytes | ||
407 | jb .Ldo_return2\key_len | ||
408 | |||
409 | vmovdqa byteswap_const(%rip), xbyteswap | ||
410 | vmovdqu (p_iv), xcounter | ||
411 | vpshufb xbyteswap, xcounter, xcounter | ||
412 | |||
413 | mov num_bytes, tmp | ||
414 | and $(7*16), tmp | ||
415 | jz .Lmult_of_8_blks\key_len | ||
416 | |||
417 | /* 1 <= tmp <= 7 */ | ||
418 | cmp $(4*16), tmp | ||
419 | jg .Lgt4\key_len | ||
420 | je .Leq4\key_len | ||
421 | |||
422 | .Llt4\key_len: | ||
423 | cmp $(2*16), tmp | ||
424 | jg .Leq3\key_len | ||
425 | je .Leq2\key_len | ||
426 | |||
427 | .Leq1\key_len: | ||
428 | do_aes_load 1, \key_len | ||
429 | add $(1*16), p_out | ||
430 | and $(~7*16), num_bytes | ||
431 | jz .Ldo_return2\key_len | ||
432 | jmp .Lmain_loop2\key_len | ||
433 | |||
434 | .Leq2\key_len: | ||
435 | do_aes_load 2, \key_len | ||
436 | add $(2*16), p_out | ||
437 | and $(~7*16), num_bytes | ||
438 | jz .Ldo_return2\key_len | ||
439 | jmp .Lmain_loop2\key_len | ||
440 | |||
441 | |||
442 | .Leq3\key_len: | ||
443 | do_aes_load 3, \key_len | ||
444 | add $(3*16), p_out | ||
445 | and $(~7*16), num_bytes | ||
446 | jz .Ldo_return2\key_len | ||
447 | jmp .Lmain_loop2\key_len | ||
448 | |||
449 | .Leq4\key_len: | ||
450 | do_aes_load 4, \key_len | ||
451 | add $(4*16), p_out | ||
452 | and $(~7*16), num_bytes | ||
453 | jz .Ldo_return2\key_len | ||
454 | jmp .Lmain_loop2\key_len | ||
455 | |||
456 | .Lgt4\key_len: | ||
457 | cmp $(6*16), tmp | ||
458 | jg .Leq7\key_len | ||
459 | je .Leq6\key_len | ||
460 | |||
461 | .Leq5\key_len: | ||
462 | do_aes_load 5, \key_len | ||
463 | add $(5*16), p_out | ||
464 | and $(~7*16), num_bytes | ||
465 | jz .Ldo_return2\key_len | ||
466 | jmp .Lmain_loop2\key_len | ||
467 | |||
468 | .Leq6\key_len: | ||
469 | do_aes_load 6, \key_len | ||
470 | add $(6*16), p_out | ||
471 | and $(~7*16), num_bytes | ||
472 | jz .Ldo_return2\key_len | ||
473 | jmp .Lmain_loop2\key_len | ||
474 | |||
475 | .Leq7\key_len: | ||
476 | do_aes_load 7, \key_len | ||
477 | add $(7*16), p_out | ||
478 | and $(~7*16), num_bytes | ||
479 | jz .Ldo_return2\key_len | ||
480 | jmp .Lmain_loop2\key_len | ||
481 | |||
482 | .Lmult_of_8_blks\key_len: | ||
483 | .if (\key_len != KEY_128) | ||
484 | vmovdqa 0*16(p_keys), xkey0 | ||
485 | vmovdqa 4*16(p_keys), xkey4 | ||
486 | vmovdqa 8*16(p_keys), xkey8 | ||
487 | vmovdqa 12*16(p_keys), xkey12 | ||
488 | .else | ||
489 | vmovdqa 0*16(p_keys), xkey0 | ||
490 | vmovdqa 3*16(p_keys), xkey4 | ||
491 | vmovdqa 6*16(p_keys), xkey8 | ||
492 | vmovdqa 9*16(p_keys), xkey12 | ||
493 | .endif | ||
494 | .align 16 | ||
495 | .Lmain_loop2\key_len: | ||
496 | /* num_bytes is a multiple of 8 and >0 */ | ||
497 | do_aes_noload 8, \key_len | ||
498 | add $(8*16), p_out | ||
499 | sub $(8*16), num_bytes | ||
500 | jne .Lmain_loop2\key_len | ||
501 | |||
502 | .Ldo_return2\key_len: | ||
503 | /* return updated IV */ | ||
504 | vpshufb xbyteswap, xcounter, xcounter | ||
505 | vmovdqu xcounter, (p_iv) | ||
506 | ret | ||
507 | .endm | ||
508 | |||
509 | /* | ||
510 | * routine to do AES128 CTR enc/decrypt "by8" | ||
511 | * XMM registers are clobbered. | ||
512 | * Saving/restoring must be done at a higher level | ||
513 | * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out, | ||
514 | * unsigned int num_bytes) | ||
515 | */ | ||
516 | ENTRY(aes_ctr_enc_128_avx_by8) | ||
517 | /* call the aes main loop */ | ||
518 | do_aes_ctrmain KEY_128 | ||
519 | |||
520 | ENDPROC(aes_ctr_enc_128_avx_by8) | ||
521 | |||
522 | /* | ||
523 | * routine to do AES192 CTR enc/decrypt "by8" | ||
524 | * XMM registers are clobbered. | ||
525 | * Saving/restoring must be done at a higher level | ||
526 | * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out, | ||
527 | * unsigned int num_bytes) | ||
528 | */ | ||
529 | ENTRY(aes_ctr_enc_192_avx_by8) | ||
530 | /* call the aes main loop */ | ||
531 | do_aes_ctrmain KEY_192 | ||
532 | |||
533 | ENDPROC(aes_ctr_enc_192_avx_by8) | ||
534 | |||
535 | /* | ||
536 | * routine to do AES256 CTR enc/decrypt "by8" | ||
537 | * XMM registers are clobbered. | ||
538 | * Saving/restoring must be done at a higher level | ||
539 | * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out, | ||
540 | * unsigned int num_bytes) | ||
541 | */ | ||
542 | ENTRY(aes_ctr_enc_256_avx_by8) | ||
543 | /* call the aes main loop */ | ||
544 | do_aes_ctrmain KEY_256 | ||
545 | |||
546 | ENDPROC(aes_ctr_enc_256_avx_by8) | ||
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 948ad0e77741..888950f29fd9 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -105,6 +105,9 @@ void crypto_fpu_exit(void); | |||
105 | #define AVX_GEN4_OPTSIZE 4096 | 105 | #define AVX_GEN4_OPTSIZE 4096 |
106 | 106 | ||
107 | #ifdef CONFIG_X86_64 | 107 | #ifdef CONFIG_X86_64 |
108 | |||
109 | static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, | ||
110 | const u8 *in, unsigned int len, u8 *iv); | ||
108 | asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, | 111 | asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, |
109 | const u8 *in, unsigned int len, u8 *iv); | 112 | const u8 *in, unsigned int len, u8 *iv); |
110 | 113 | ||
@@ -155,6 +158,12 @@ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out, | |||
155 | 158 | ||
156 | 159 | ||
157 | #ifdef CONFIG_AS_AVX | 160 | #ifdef CONFIG_AS_AVX |
161 | asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, | ||
162 | void *keys, u8 *out, unsigned int num_bytes); | ||
163 | asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, | ||
164 | void *keys, u8 *out, unsigned int num_bytes); | ||
165 | asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, | ||
166 | void *keys, u8 *out, unsigned int num_bytes); | ||
158 | /* | 167 | /* |
159 | * asmlinkage void aesni_gcm_precomp_avx_gen2() | 168 | * asmlinkage void aesni_gcm_precomp_avx_gen2() |
160 | * gcm_data *my_ctx_data, context data | 169 | * gcm_data *my_ctx_data, context data |
@@ -472,6 +481,25 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, | |||
472 | crypto_inc(ctrblk, AES_BLOCK_SIZE); | 481 | crypto_inc(ctrblk, AES_BLOCK_SIZE); |
473 | } | 482 | } |
474 | 483 | ||
484 | #ifdef CONFIG_AS_AVX | ||
485 | static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, | ||
486 | const u8 *in, unsigned int len, u8 *iv) | ||
487 | { | ||
488 | /* | ||
489 | * based on key length, override with the by8 version | ||
490 | * of ctr mode encryption/decryption for improved performance | ||
491 | * aes_set_key_common() ensures that key length is one of | ||
492 | * {128,192,256} | ||
493 | */ | ||
494 | if (ctx->key_length == AES_KEYSIZE_128) | ||
495 | aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len); | ||
496 | else if (ctx->key_length == AES_KEYSIZE_192) | ||
497 | aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len); | ||
498 | else | ||
499 | aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); | ||
500 | } | ||
501 | #endif | ||
502 | |||
475 | static int ctr_crypt(struct blkcipher_desc *desc, | 503 | static int ctr_crypt(struct blkcipher_desc *desc, |
476 | struct scatterlist *dst, struct scatterlist *src, | 504 | struct scatterlist *dst, struct scatterlist *src, |
477 | unsigned int nbytes) | 505 | unsigned int nbytes) |
@@ -486,8 +514,8 @@ static int ctr_crypt(struct blkcipher_desc *desc, | |||
486 | 514 | ||
487 | kernel_fpu_begin(); | 515 | kernel_fpu_begin(); |
488 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { | 516 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
489 | aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, | 517 | aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
490 | nbytes & AES_BLOCK_MASK, walk.iv); | 518 | nbytes & AES_BLOCK_MASK, walk.iv); |
491 | nbytes &= AES_BLOCK_SIZE - 1; | 519 | nbytes &= AES_BLOCK_SIZE - 1; |
492 | err = blkcipher_walk_done(desc, &walk, nbytes); | 520 | err = blkcipher_walk_done(desc, &walk, nbytes); |
493 | } | 521 | } |
@@ -1493,6 +1521,14 @@ static int __init aesni_init(void) | |||
1493 | aesni_gcm_enc_tfm = aesni_gcm_enc; | 1521 | aesni_gcm_enc_tfm = aesni_gcm_enc; |
1494 | aesni_gcm_dec_tfm = aesni_gcm_dec; | 1522 | aesni_gcm_dec_tfm = aesni_gcm_dec; |
1495 | } | 1523 | } |
1524 | aesni_ctr_enc_tfm = aesni_ctr_enc; | ||
1525 | #ifdef CONFIG_AS_AVX | ||
1526 | if (cpu_has_avx) { | ||
1527 | /* optimize performance of ctr mode encryption transform */ | ||
1528 | aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; | ||
1529 | pr_info("AES CTR mode by8 optimization enabled\n"); | ||
1530 | } | ||
1531 | #endif | ||
1496 | #endif | 1532 | #endif |
1497 | 1533 | ||
1498 | err = crypto_fpu_init(); | 1534 | err = crypto_fpu_init(); |
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index dbc4339b5417..26d49ebae040 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S | |||
@@ -72,6 +72,7 @@ | |||
72 | 72 | ||
73 | # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init); | 73 | # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init); |
74 | 74 | ||
75 | .text | ||
75 | ENTRY(crc_pcl) | 76 | ENTRY(crc_pcl) |
76 | #define bufp %rdi | 77 | #define bufp %rdi |
77 | #define bufp_dw %edi | 78 | #define bufp_dw %edi |
@@ -216,15 +217,11 @@ LABEL crc_ %i | |||
216 | ## 4) Combine three results: | 217 | ## 4) Combine three results: |
217 | ################################################################ | 218 | ################################################################ |
218 | 219 | ||
219 | lea (K_table-16)(%rip), bufp # first entry is for idx 1 | 220 | lea (K_table-8)(%rip), bufp # first entry is for idx 1 |
220 | shlq $3, %rax # rax *= 8 | 221 | shlq $3, %rax # rax *= 8 |
221 | subq %rax, tmp # tmp -= rax*8 | 222 | pmovzxdq (bufp,%rax), %xmm0 # 2 consts: K1:K2 |
222 | shlq $1, %rax | 223 | leal (%eax,%eax,2), %eax # rax *= 3 (total *24) |
223 | subq %rax, tmp # tmp -= rax*16 | 224 | subq %rax, tmp # tmp -= rax*24 |
224 | # (total tmp -= rax*24) | ||
225 | addq %rax, bufp | ||
226 | |||
227 | movdqa (bufp), %xmm0 # 2 consts: K1:K2 | ||
228 | 225 | ||
229 | movq crc_init, %xmm1 # CRC for block 1 | 226 | movq crc_init, %xmm1 # CRC for block 1 |
230 | PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2 | 227 | PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2 |
@@ -238,9 +235,9 @@ LABEL crc_ %i | |||
238 | mov crc2, crc_init | 235 | mov crc2, crc_init |
239 | crc32 %rax, crc_init | 236 | crc32 %rax, crc_init |
240 | 237 | ||
241 | ################################################################ | 238 | ################################################################ |
242 | ## 5) Check for end: | 239 | ## 5) Check for end: |
243 | ################################################################ | 240 | ################################################################ |
244 | 241 | ||
245 | LABEL crc_ 0 | 242 | LABEL crc_ 0 |
246 | mov tmp, len | 243 | mov tmp, len |
@@ -331,136 +328,136 @@ ENDPROC(crc_pcl) | |||
331 | 328 | ||
332 | ################################################################ | 329 | ################################################################ |
333 | ## PCLMULQDQ tables | 330 | ## PCLMULQDQ tables |
334 | ## Table is 128 entries x 2 quad words each | 331 | ## Table is 128 entries x 2 words (8 bytes) each |
335 | ################################################################ | 332 | ################################################################ |
336 | .data | 333 | .section .rotata, "a", %progbits |
337 | .align 64 | 334 | .align 8 |
338 | K_table: | 335 | K_table: |
339 | .quad 0x14cd00bd6,0x105ec76f0 | 336 | .long 0x493c7d27, 0x00000001 |
340 | .quad 0x0ba4fc28e,0x14cd00bd6 | 337 | .long 0xba4fc28e, 0x493c7d27 |
341 | .quad 0x1d82c63da,0x0f20c0dfe | 338 | .long 0xddc0152b, 0xf20c0dfe |
342 | .quad 0x09e4addf8,0x0ba4fc28e | 339 | .long 0x9e4addf8, 0xba4fc28e |
343 | .quad 0x039d3b296,0x1384aa63a | 340 | .long 0x39d3b296, 0x3da6d0cb |
344 | .quad 0x102f9b8a2,0x1d82c63da | 341 | .long 0x0715ce53, 0xddc0152b |
345 | .quad 0x14237f5e6,0x01c291d04 | 342 | .long 0x47db8317, 0x1c291d04 |
346 | .quad 0x00d3b6092,0x09e4addf8 | 343 | .long 0x0d3b6092, 0x9e4addf8 |
347 | .quad 0x0c96cfdc0,0x0740eef02 | 344 | .long 0xc96cfdc0, 0x740eef02 |
348 | .quad 0x18266e456,0x039d3b296 | 345 | .long 0x878a92a7, 0x39d3b296 |
349 | .quad 0x0daece73e,0x0083a6eec | 346 | .long 0xdaece73e, 0x083a6eec |
350 | .quad 0x0ab7aff2a,0x102f9b8a2 | 347 | .long 0xab7aff2a, 0x0715ce53 |
351 | .quad 0x1248ea574,0x1c1733996 | 348 | .long 0x2162d385, 0xc49f4f67 |
352 | .quad 0x083348832,0x14237f5e6 | 349 | .long 0x83348832, 0x47db8317 |
353 | .quad 0x12c743124,0x02ad91c30 | 350 | .long 0x299847d5, 0x2ad91c30 |
354 | .quad 0x0b9e02b86,0x00d3b6092 | 351 | .long 0xb9e02b86, 0x0d3b6092 |
355 | .quad 0x018b33a4e,0x06992cea2 | 352 | .long 0x18b33a4e, 0x6992cea2 |
356 | .quad 0x1b331e26a,0x0c96cfdc0 | 353 | .long 0xb6dd949b, 0xc96cfdc0 |
357 | .quad 0x17d35ba46,0x07e908048 | 354 | .long 0x78d9ccb7, 0x7e908048 |
358 | .quad 0x1bf2e8b8a,0x18266e456 | 355 | .long 0xbac2fd7b, 0x878a92a7 |
359 | .quad 0x1a3e0968a,0x11ed1f9d8 | 356 | .long 0xa60ce07b, 0x1b3d8f29 |
360 | .quad 0x0ce7f39f4,0x0daece73e | 357 | .long 0xce7f39f4, 0xdaece73e |
361 | .quad 0x061d82e56,0x0f1d0f55e | 358 | .long 0x61d82e56, 0xf1d0f55e |
362 | .quad 0x0d270f1a2,0x0ab7aff2a | 359 | .long 0xd270f1a2, 0xab7aff2a |
363 | .quad 0x1c3f5f66c,0x0a87ab8a8 | 360 | .long 0xc619809d, 0xa87ab8a8 |
364 | .quad 0x12ed0daac,0x1248ea574 | 361 | .long 0x2b3cac5d, 0x2162d385 |
365 | .quad 0x065863b64,0x08462d800 | 362 | .long 0x65863b64, 0x8462d800 |
366 | .quad 0x11eef4f8e,0x083348832 | 363 | .long 0x1b03397f, 0x83348832 |
367 | .quad 0x1ee54f54c,0x071d111a8 | 364 | .long 0xebb883bd, 0x71d111a8 |
368 | .quad 0x0b3e32c28,0x12c743124 | 365 | .long 0xb3e32c28, 0x299847d5 |
369 | .quad 0x0064f7f26,0x0ffd852c6 | 366 | .long 0x064f7f26, 0xffd852c6 |
370 | .quad 0x0dd7e3b0c,0x0b9e02b86 | 367 | .long 0xdd7e3b0c, 0xb9e02b86 |
371 | .quad 0x0f285651c,0x0dcb17aa4 | 368 | .long 0xf285651c, 0xdcb17aa4 |
372 | .quad 0x010746f3c,0x018b33a4e | 369 | .long 0x10746f3c, 0x18b33a4e |
373 | .quad 0x1c24afea4,0x0f37c5aee | 370 | .long 0xc7a68855, 0xf37c5aee |
374 | .quad 0x0271d9844,0x1b331e26a | 371 | .long 0x271d9844, 0xb6dd949b |
375 | .quad 0x08e766a0c,0x06051d5a2 | 372 | .long 0x8e766a0c, 0x6051d5a2 |
376 | .quad 0x093a5f730,0x17d35ba46 | 373 | .long 0x93a5f730, 0x78d9ccb7 |
377 | .quad 0x06cb08e5c,0x11d5ca20e | 374 | .long 0x6cb08e5c, 0x18b0d4ff |
378 | .quad 0x06b749fb2,0x1bf2e8b8a | 375 | .long 0x6b749fb2, 0xbac2fd7b |
379 | .quad 0x1167f94f2,0x021f3d99c | 376 | .long 0x1393e203, 0x21f3d99c |
380 | .quad 0x0cec3662e,0x1a3e0968a | 377 | .long 0xcec3662e, 0xa60ce07b |
381 | .quad 0x19329634a,0x08f158014 | 378 | .long 0x96c515bb, 0x8f158014 |
382 | .quad 0x0e6fc4e6a,0x0ce7f39f4 | 379 | .long 0xe6fc4e6a, 0xce7f39f4 |
383 | .quad 0x08227bb8a,0x1a5e82106 | 380 | .long 0x8227bb8a, 0xa00457f7 |
384 | .quad 0x0b0cd4768,0x061d82e56 | 381 | .long 0xb0cd4768, 0x61d82e56 |
385 | .quad 0x13c2b89c4,0x188815ab2 | 382 | .long 0x39c7ff35, 0x8d6d2c43 |
386 | .quad 0x0d7a4825c,0x0d270f1a2 | 383 | .long 0xd7a4825c, 0xd270f1a2 |
387 | .quad 0x10f5ff2ba,0x105405f3e | 384 | .long 0x0ab3844b, 0x00ac29cf |
388 | .quad 0x00167d312,0x1c3f5f66c | 385 | .long 0x0167d312, 0xc619809d |
389 | .quad 0x0f6076544,0x0e9adf796 | 386 | .long 0xf6076544, 0xe9adf796 |
390 | .quad 0x026f6a60a,0x12ed0daac | 387 | .long 0x26f6a60a, 0x2b3cac5d |
391 | .quad 0x1a2adb74e,0x096638b34 | 388 | .long 0xa741c1bf, 0x96638b34 |
392 | .quad 0x19d34af3a,0x065863b64 | 389 | .long 0x98d8d9cb, 0x65863b64 |
393 | .quad 0x049c3cc9c,0x1e50585a0 | 390 | .long 0x49c3cc9c, 0xe0e9f351 |
394 | .quad 0x068bce87a,0x11eef4f8e | 391 | .long 0x68bce87a, 0x1b03397f |
395 | .quad 0x1524fa6c6,0x19f1c69dc | 392 | .long 0x57a3d037, 0x9af01f2d |
396 | .quad 0x16cba8aca,0x1ee54f54c | 393 | .long 0x6956fc3b, 0xebb883bd |
397 | .quad 0x042d98888,0x12913343e | 394 | .long 0x42d98888, 0x2cff42cf |
398 | .quad 0x1329d9f7e,0x0b3e32c28 | 395 | .long 0x3771e98f, 0xb3e32c28 |
399 | .quad 0x1b1c69528,0x088f25a3a | 396 | .long 0xb42ae3d9, 0x88f25a3a |
400 | .quad 0x02178513a,0x0064f7f26 | 397 | .long 0x2178513a, 0x064f7f26 |
401 | .quad 0x0e0ac139e,0x04e36f0b0 | 398 | .long 0xe0ac139e, 0x4e36f0b0 |
402 | .quad 0x0170076fa,0x0dd7e3b0c | 399 | .long 0x170076fa, 0xdd7e3b0c |
403 | .quad 0x141a1a2e2,0x0bd6f81f8 | 400 | .long 0x444dd413, 0xbd6f81f8 |
404 | .quad 0x16ad828b4,0x0f285651c | 401 | .long 0x6f345e45, 0xf285651c |
405 | .quad 0x041d17b64,0x19425cbba | 402 | .long 0x41d17b64, 0x91c9bd4b |
406 | .quad 0x1fae1cc66,0x010746f3c | 403 | .long 0xff0dba97, 0x10746f3c |
407 | .quad 0x1a75b4b00,0x18db37e8a | 404 | .long 0xa2b73df1, 0x885f087b |
408 | .quad 0x0f872e54c,0x1c24afea4 | 405 | .long 0xf872e54c, 0xc7a68855 |
409 | .quad 0x01e41e9fc,0x04c144932 | 406 | .long 0x1e41e9fc, 0x4c144932 |
410 | .quad 0x086d8e4d2,0x0271d9844 | 407 | .long 0x86d8e4d2, 0x271d9844 |
411 | .quad 0x160f7af7a,0x052148f02 | 408 | .long 0x651bd98b, 0x52148f02 |
412 | .quad 0x05bb8f1bc,0x08e766a0c | 409 | .long 0x5bb8f1bc, 0x8e766a0c |
413 | .quad 0x0a90fd27a,0x0a3c6f37a | 410 | .long 0xa90fd27a, 0xa3c6f37a |
414 | .quad 0x0b3af077a,0x093a5f730 | 411 | .long 0xb3af077a, 0x93a5f730 |
415 | .quad 0x04984d782,0x1d22c238e | 412 | .long 0x4984d782, 0xd7c0557f |
416 | .quad 0x0ca6ef3ac,0x06cb08e5c | 413 | .long 0xca6ef3ac, 0x6cb08e5c |
417 | .quad 0x0234e0b26,0x063ded06a | 414 | .long 0x234e0b26, 0x63ded06a |
418 | .quad 0x1d88abd4a,0x06b749fb2 | 415 | .long 0xdd66cbbb, 0x6b749fb2 |
419 | .quad 0x04597456a,0x04d56973c | 416 | .long 0x4597456a, 0x4d56973c |
420 | .quad 0x0e9e28eb4,0x1167f94f2 | 417 | .long 0xe9e28eb4, 0x1393e203 |
421 | .quad 0x07b3ff57a,0x19385bf2e | 418 | .long 0x7b3ff57a, 0x9669c9df |
422 | .quad 0x0c9c8b782,0x0cec3662e | 419 | .long 0xc9c8b782, 0xcec3662e |
423 | .quad 0x13a9cba9e,0x0e417f38a | 420 | .long 0x3f70cc6f, 0xe417f38a |
424 | .quad 0x093e106a4,0x19329634a | 421 | .long 0x93e106a4, 0x96c515bb |
425 | .quad 0x167001a9c,0x14e727980 | 422 | .long 0x62ec6c6d, 0x4b9e0f71 |
426 | .quad 0x1ddffc5d4,0x0e6fc4e6a | 423 | .long 0xd813b325, 0xe6fc4e6a |
427 | .quad 0x00df04680,0x0d104b8fc | 424 | .long 0x0df04680, 0xd104b8fc |
428 | .quad 0x02342001e,0x08227bb8a | 425 | .long 0x2342001e, 0x8227bb8a |
429 | .quad 0x00a2a8d7e,0x05b397730 | 426 | .long 0x0a2a8d7e, 0x5b397730 |
430 | .quad 0x168763fa6,0x0b0cd4768 | 427 | .long 0x6d9a4957, 0xb0cd4768 |
431 | .quad 0x1ed5a407a,0x0e78eb416 | 428 | .long 0xe8b6368b, 0xe78eb416 |
432 | .quad 0x0d2c3ed1a,0x13c2b89c4 | 429 | .long 0xd2c3ed1a, 0x39c7ff35 |
433 | .quad 0x0995a5724,0x1641378f0 | 430 | .long 0x995a5724, 0x61ff0e01 |
434 | .quad 0x19b1afbc4,0x0d7a4825c | 431 | .long 0x9ef68d35, 0xd7a4825c |
435 | .quad 0x109ffedc0,0x08d96551c | 432 | .long 0x0c139b31, 0x8d96551c |
436 | .quad 0x0f2271e60,0x10f5ff2ba | 433 | .long 0xf2271e60, 0x0ab3844b |
437 | .quad 0x00b0bf8ca,0x00bf80dd2 | 434 | .long 0x0b0bf8ca, 0x0bf80dd2 |
438 | .quad 0x123888b7a,0x00167d312 | 435 | .long 0x2664fd8b, 0x0167d312 |
439 | .quad 0x1e888f7dc,0x18dcddd1c | 436 | .long 0xed64812d, 0x8821abed |
440 | .quad 0x002ee03b2,0x0f6076544 | 437 | .long 0x02ee03b2, 0xf6076544 |
441 | .quad 0x183e8d8fe,0x06a45d2b2 | 438 | .long 0x8604ae0f, 0x6a45d2b2 |
442 | .quad 0x133d7a042,0x026f6a60a | 439 | .long 0x363bd6b3, 0x26f6a60a |
443 | .quad 0x116b0f50c,0x1dd3e10e8 | 440 | .long 0x135c83fd, 0xd8d26619 |
444 | .quad 0x05fabe670,0x1a2adb74e | 441 | .long 0x5fabe670, 0xa741c1bf |
445 | .quad 0x130004488,0x0de87806c | 442 | .long 0x35ec3279, 0xde87806c |
446 | .quad 0x000bcf5f6,0x19d34af3a | 443 | .long 0x00bcf5f6, 0x98d8d9cb |
447 | .quad 0x18f0c7078,0x014338754 | 444 | .long 0x8ae00689, 0x14338754 |
448 | .quad 0x017f27698,0x049c3cc9c | 445 | .long 0x17f27698, 0x49c3cc9c |
449 | .quad 0x058ca5f00,0x15e3e77ee | 446 | .long 0x58ca5f00, 0x5bd2011f |
450 | .quad 0x1af900c24,0x068bce87a | 447 | .long 0xaa7c7ad5, 0x68bce87a |
451 | .quad 0x0b5cfca28,0x0dd07448e | 448 | .long 0xb5cfca28, 0xdd07448e |
452 | .quad 0x0ded288f8,0x1524fa6c6 | 449 | .long 0xded288f8, 0x57a3d037 |
453 | .quad 0x059f229bc,0x1d8048348 | 450 | .long 0x59f229bc, 0xdde8f5b9 |
454 | .quad 0x06d390dec,0x16cba8aca | 451 | .long 0x6d390dec, 0x6956fc3b |
455 | .quad 0x037170390,0x0a3e3e02c | 452 | .long 0x37170390, 0xa3e3e02c |
456 | .quad 0x06353c1cc,0x042d98888 | 453 | .long 0x6353c1cc, 0x42d98888 |
457 | .quad 0x0c4584f5c,0x0d73c7bea | 454 | .long 0xc4584f5c, 0xd73c7bea |
458 | .quad 0x1f16a3418,0x1329d9f7e | 455 | .long 0xf48642e9, 0x3771e98f |
459 | .quad 0x0531377e2,0x185137662 | 456 | .long 0x531377e2, 0x80ff0093 |
460 | .quad 0x1d8d9ca7c,0x1b1c69528 | 457 | .long 0xdd35bc8d, 0xb42ae3d9 |
461 | .quad 0x0b25b29f2,0x18a08b5bc | 458 | .long 0xb25b29f2, 0x8fe4c34d |
462 | .quad 0x19fb2a8b0,0x02178513a | 459 | .long 0x9a5ede41, 0x2178513a |
463 | .quad 0x1a08fe6ac,0x1da758ae0 | 460 | .long 0xa563905d, 0xdf99fc11 |
464 | .quad 0x045cddf4e,0x0e0ac139e | 461 | .long 0x45cddf4e, 0xe0ac139e |
465 | .quad 0x1a91647f2,0x169cf9eb0 | 462 | .long 0xacfa3103, 0x6c23e841 |
466 | .quad 0x1a0f717c4,0x0170076fa | 463 | .long 0xa51b6135, 0x170076fa |
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S new file mode 100644 index 000000000000..038f6ae87c5e --- /dev/null +++ b/arch/x86/crypto/des3_ede-asm_64.S | |||
@@ -0,0 +1,805 @@ | |||
1 | /* | ||
2 | * des3_ede-asm_64.S - x86-64 assembly implementation of 3DES cipher | ||
3 | * | ||
4 | * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | |||
19 | .file "des3_ede-asm_64.S" | ||
20 | .text | ||
21 | |||
22 | #define s1 .L_s1 | ||
23 | #define s2 ((s1) + (64*8)) | ||
24 | #define s3 ((s2) + (64*8)) | ||
25 | #define s4 ((s3) + (64*8)) | ||
26 | #define s5 ((s4) + (64*8)) | ||
27 | #define s6 ((s5) + (64*8)) | ||
28 | #define s7 ((s6) + (64*8)) | ||
29 | #define s8 ((s7) + (64*8)) | ||
30 | |||
31 | /* register macros */ | ||
32 | #define CTX %rdi | ||
33 | |||
34 | #define RL0 %r8 | ||
35 | #define RL1 %r9 | ||
36 | #define RL2 %r10 | ||
37 | |||
38 | #define RL0d %r8d | ||
39 | #define RL1d %r9d | ||
40 | #define RL2d %r10d | ||
41 | |||
42 | #define RR0 %r11 | ||
43 | #define RR1 %r12 | ||
44 | #define RR2 %r13 | ||
45 | |||
46 | #define RR0d %r11d | ||
47 | #define RR1d %r12d | ||
48 | #define RR2d %r13d | ||
49 | |||
50 | #define RW0 %rax | ||
51 | #define RW1 %rbx | ||
52 | #define RW2 %rcx | ||
53 | |||
54 | #define RW0d %eax | ||
55 | #define RW1d %ebx | ||
56 | #define RW2d %ecx | ||
57 | |||
58 | #define RW0bl %al | ||
59 | #define RW1bl %bl | ||
60 | #define RW2bl %cl | ||
61 | |||
62 | #define RW0bh %ah | ||
63 | #define RW1bh %bh | ||
64 | #define RW2bh %ch | ||
65 | |||
66 | #define RT0 %r15 | ||
67 | #define RT1 %rbp | ||
68 | #define RT2 %r14 | ||
69 | #define RT3 %rdx | ||
70 | |||
71 | #define RT0d %r15d | ||
72 | #define RT1d %ebp | ||
73 | #define RT2d %r14d | ||
74 | #define RT3d %edx | ||
75 | |||
76 | /*********************************************************************** | ||
77 | * 1-way 3DES | ||
78 | ***********************************************************************/ | ||
79 | #define do_permutation(a, b, offset, mask) \ | ||
80 | movl a, RT0d; \ | ||
81 | shrl $(offset), RT0d; \ | ||
82 | xorl b, RT0d; \ | ||
83 | andl $(mask), RT0d; \ | ||
84 | xorl RT0d, b; \ | ||
85 | shll $(offset), RT0d; \ | ||
86 | xorl RT0d, a; | ||
87 | |||
88 | #define expand_to_64bits(val, mask) \ | ||
89 | movl val##d, RT0d; \ | ||
90 | rorl $4, RT0d; \ | ||
91 | shlq $32, RT0; \ | ||
92 | orq RT0, val; \ | ||
93 | andq mask, val; | ||
94 | |||
95 | #define compress_to_64bits(val) \ | ||
96 | movq val, RT0; \ | ||
97 | shrq $32, RT0; \ | ||
98 | roll $4, RT0d; \ | ||
99 | orl RT0d, val##d; | ||
100 | |||
101 | #define initial_permutation(left, right) \ | ||
102 | do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \ | ||
103 | do_permutation(left##d, right##d, 16, 0x0000ffff); \ | ||
104 | do_permutation(right##d, left##d, 2, 0x33333333); \ | ||
105 | do_permutation(right##d, left##d, 8, 0x00ff00ff); \ | ||
106 | movabs $0x3f3f3f3f3f3f3f3f, RT3; \ | ||
107 | movl left##d, RW0d; \ | ||
108 | roll $1, right##d; \ | ||
109 | xorl right##d, RW0d; \ | ||
110 | andl $0xaaaaaaaa, RW0d; \ | ||
111 | xorl RW0d, left##d; \ | ||
112 | xorl RW0d, right##d; \ | ||
113 | roll $1, left##d; \ | ||
114 | expand_to_64bits(right, RT3); \ | ||
115 | expand_to_64bits(left, RT3); | ||
116 | |||
117 | #define final_permutation(left, right) \ | ||
118 | compress_to_64bits(right); \ | ||
119 | compress_to_64bits(left); \ | ||
120 | movl right##d, RW0d; \ | ||
121 | rorl $1, left##d; \ | ||
122 | xorl left##d, RW0d; \ | ||
123 | andl $0xaaaaaaaa, RW0d; \ | ||
124 | xorl RW0d, right##d; \ | ||
125 | xorl RW0d, left##d; \ | ||
126 | rorl $1, right##d; \ | ||
127 | do_permutation(right##d, left##d, 8, 0x00ff00ff); \ | ||
128 | do_permutation(right##d, left##d, 2, 0x33333333); \ | ||
129 | do_permutation(left##d, right##d, 16, 0x0000ffff); \ | ||
130 | do_permutation(left##d, right##d, 4, 0x0f0f0f0f); | ||
131 | |||
132 | #define round1(n, from, to, load_next_key) \ | ||
133 | xorq from, RW0; \ | ||
134 | \ | ||
135 | movzbl RW0bl, RT0d; \ | ||
136 | movzbl RW0bh, RT1d; \ | ||
137 | shrq $16, RW0; \ | ||
138 | movzbl RW0bl, RT2d; \ | ||
139 | movzbl RW0bh, RT3d; \ | ||
140 | shrq $16, RW0; \ | ||
141 | movq s8(, RT0, 8), RT0; \ | ||
142 | xorq s6(, RT1, 8), to; \ | ||
143 | movzbl RW0bl, RL1d; \ | ||
144 | movzbl RW0bh, RT1d; \ | ||
145 | shrl $16, RW0d; \ | ||
146 | xorq s4(, RT2, 8), RT0; \ | ||
147 | xorq s2(, RT3, 8), to; \ | ||
148 | movzbl RW0bl, RT2d; \ | ||
149 | movzbl RW0bh, RT3d; \ | ||
150 | xorq s7(, RL1, 8), RT0; \ | ||
151 | xorq s5(, RT1, 8), to; \ | ||
152 | xorq s3(, RT2, 8), RT0; \ | ||
153 | load_next_key(n, RW0); \ | ||
154 | xorq RT0, to; \ | ||
155 | xorq s1(, RT3, 8), to; \ | ||
156 | |||
157 | #define load_next_key(n, RWx) \ | ||
158 | movq (((n) + 1) * 8)(CTX), RWx; | ||
159 | |||
160 | #define dummy2(a, b) /*_*/ | ||
161 | |||
162 | #define read_block(io, left, right) \ | ||
163 | movl (io), left##d; \ | ||
164 | movl 4(io), right##d; \ | ||
165 | bswapl left##d; \ | ||
166 | bswapl right##d; | ||
167 | |||
168 | #define write_block(io, left, right) \ | ||
169 | bswapl left##d; \ | ||
170 | bswapl right##d; \ | ||
171 | movl left##d, (io); \ | ||
172 | movl right##d, 4(io); | ||
173 | |||
174 | ENTRY(des3_ede_x86_64_crypt_blk) | ||
175 | /* input: | ||
176 | * %rdi: round keys, CTX | ||
177 | * %rsi: dst | ||
178 | * %rdx: src | ||
179 | */ | ||
180 | pushq %rbp; | ||
181 | pushq %rbx; | ||
182 | pushq %r12; | ||
183 | pushq %r13; | ||
184 | pushq %r14; | ||
185 | pushq %r15; | ||
186 | |||
187 | read_block(%rdx, RL0, RR0); | ||
188 | initial_permutation(RL0, RR0); | ||
189 | |||
190 | movq (CTX), RW0; | ||
191 | |||
192 | round1(0, RR0, RL0, load_next_key); | ||
193 | round1(1, RL0, RR0, load_next_key); | ||
194 | round1(2, RR0, RL0, load_next_key); | ||
195 | round1(3, RL0, RR0, load_next_key); | ||
196 | round1(4, RR0, RL0, load_next_key); | ||
197 | round1(5, RL0, RR0, load_next_key); | ||
198 | round1(6, RR0, RL0, load_next_key); | ||
199 | round1(7, RL0, RR0, load_next_key); | ||
200 | round1(8, RR0, RL0, load_next_key); | ||
201 | round1(9, RL0, RR0, load_next_key); | ||
202 | round1(10, RR0, RL0, load_next_key); | ||
203 | round1(11, RL0, RR0, load_next_key); | ||
204 | round1(12, RR0, RL0, load_next_key); | ||
205 | round1(13, RL0, RR0, load_next_key); | ||
206 | round1(14, RR0, RL0, load_next_key); | ||
207 | round1(15, RL0, RR0, load_next_key); | ||
208 | |||
209 | round1(16+0, RL0, RR0, load_next_key); | ||
210 | round1(16+1, RR0, RL0, load_next_key); | ||
211 | round1(16+2, RL0, RR0, load_next_key); | ||
212 | round1(16+3, RR0, RL0, load_next_key); | ||
213 | round1(16+4, RL0, RR0, load_next_key); | ||
214 | round1(16+5, RR0, RL0, load_next_key); | ||
215 | round1(16+6, RL0, RR0, load_next_key); | ||
216 | round1(16+7, RR0, RL0, load_next_key); | ||
217 | round1(16+8, RL0, RR0, load_next_key); | ||
218 | round1(16+9, RR0, RL0, load_next_key); | ||
219 | round1(16+10, RL0, RR0, load_next_key); | ||
220 | round1(16+11, RR0, RL0, load_next_key); | ||
221 | round1(16+12, RL0, RR0, load_next_key); | ||
222 | round1(16+13, RR0, RL0, load_next_key); | ||
223 | round1(16+14, RL0, RR0, load_next_key); | ||
224 | round1(16+15, RR0, RL0, load_next_key); | ||
225 | |||
226 | round1(32+0, RR0, RL0, load_next_key); | ||
227 | round1(32+1, RL0, RR0, load_next_key); | ||
228 | round1(32+2, RR0, RL0, load_next_key); | ||
229 | round1(32+3, RL0, RR0, load_next_key); | ||
230 | round1(32+4, RR0, RL0, load_next_key); | ||
231 | round1(32+5, RL0, RR0, load_next_key); | ||
232 | round1(32+6, RR0, RL0, load_next_key); | ||
233 | round1(32+7, RL0, RR0, load_next_key); | ||
234 | round1(32+8, RR0, RL0, load_next_key); | ||
235 | round1(32+9, RL0, RR0, load_next_key); | ||
236 | round1(32+10, RR0, RL0, load_next_key); | ||
237 | round1(32+11, RL0, RR0, load_next_key); | ||
238 | round1(32+12, RR0, RL0, load_next_key); | ||
239 | round1(32+13, RL0, RR0, load_next_key); | ||
240 | round1(32+14, RR0, RL0, load_next_key); | ||
241 | round1(32+15, RL0, RR0, dummy2); | ||
242 | |||
243 | final_permutation(RR0, RL0); | ||
244 | write_block(%rsi, RR0, RL0); | ||
245 | |||
246 | popq %r15; | ||
247 | popq %r14; | ||
248 | popq %r13; | ||
249 | popq %r12; | ||
250 | popq %rbx; | ||
251 | popq %rbp; | ||
252 | |||
253 | ret; | ||
254 | ENDPROC(des3_ede_x86_64_crypt_blk) | ||
255 | |||
256 | /*********************************************************************** | ||
257 | * 3-way 3DES | ||
258 | ***********************************************************************/ | ||
259 | #define expand_to_64bits(val, mask) \ | ||
260 | movl val##d, RT0d; \ | ||
261 | rorl $4, RT0d; \ | ||
262 | shlq $32, RT0; \ | ||
263 | orq RT0, val; \ | ||
264 | andq mask, val; | ||
265 | |||
266 | #define compress_to_64bits(val) \ | ||
267 | movq val, RT0; \ | ||
268 | shrq $32, RT0; \ | ||
269 | roll $4, RT0d; \ | ||
270 | orl RT0d, val##d; | ||
271 | |||
272 | #define initial_permutation3(left, right) \ | ||
273 | do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ | ||
274 | do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ | ||
275 | do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ | ||
276 | do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ | ||
277 | do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \ | ||
278 | do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ | ||
279 | \ | ||
280 | do_permutation(right##0d, left##0d, 2, 0x33333333); \ | ||
281 | do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ | ||
282 | do_permutation(right##1d, left##1d, 2, 0x33333333); \ | ||
283 | do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ | ||
284 | do_permutation(right##2d, left##2d, 2, 0x33333333); \ | ||
285 | do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ | ||
286 | \ | ||
287 | movabs $0x3f3f3f3f3f3f3f3f, RT3; \ | ||
288 | \ | ||
289 | movl left##0d, RW0d; \ | ||
290 | roll $1, right##0d; \ | ||
291 | xorl right##0d, RW0d; \ | ||
292 | andl $0xaaaaaaaa, RW0d; \ | ||
293 | xorl RW0d, left##0d; \ | ||
294 | xorl RW0d, right##0d; \ | ||
295 | roll $1, left##0d; \ | ||
296 | expand_to_64bits(right##0, RT3); \ | ||
297 | expand_to_64bits(left##0, RT3); \ | ||
298 | movl left##1d, RW1d; \ | ||
299 | roll $1, right##1d; \ | ||
300 | xorl right##1d, RW1d; \ | ||
301 | andl $0xaaaaaaaa, RW1d; \ | ||
302 | xorl RW1d, left##1d; \ | ||
303 | xorl RW1d, right##1d; \ | ||
304 | roll $1, left##1d; \ | ||
305 | expand_to_64bits(right##1, RT3); \ | ||
306 | expand_to_64bits(left##1, RT3); \ | ||
307 | movl left##2d, RW2d; \ | ||
308 | roll $1, right##2d; \ | ||
309 | xorl right##2d, RW2d; \ | ||
310 | andl $0xaaaaaaaa, RW2d; \ | ||
311 | xorl RW2d, left##2d; \ | ||
312 | xorl RW2d, right##2d; \ | ||
313 | roll $1, left##2d; \ | ||
314 | expand_to_64bits(right##2, RT3); \ | ||
315 | expand_to_64bits(left##2, RT3); | ||
316 | |||
317 | #define final_permutation3(left, right) \ | ||
318 | compress_to_64bits(right##0); \ | ||
319 | compress_to_64bits(left##0); \ | ||
320 | movl right##0d, RW0d; \ | ||
321 | rorl $1, left##0d; \ | ||
322 | xorl left##0d, RW0d; \ | ||
323 | andl $0xaaaaaaaa, RW0d; \ | ||
324 | xorl RW0d, right##0d; \ | ||
325 | xorl RW0d, left##0d; \ | ||
326 | rorl $1, right##0d; \ | ||
327 | compress_to_64bits(right##1); \ | ||
328 | compress_to_64bits(left##1); \ | ||
329 | movl right##1d, RW1d; \ | ||
330 | rorl $1, left##1d; \ | ||
331 | xorl left##1d, RW1d; \ | ||
332 | andl $0xaaaaaaaa, RW1d; \ | ||
333 | xorl RW1d, right##1d; \ | ||
334 | xorl RW1d, left##1d; \ | ||
335 | rorl $1, right##1d; \ | ||
336 | compress_to_64bits(right##2); \ | ||
337 | compress_to_64bits(left##2); \ | ||
338 | movl right##2d, RW2d; \ | ||
339 | rorl $1, left##2d; \ | ||
340 | xorl left##2d, RW2d; \ | ||
341 | andl $0xaaaaaaaa, RW2d; \ | ||
342 | xorl RW2d, right##2d; \ | ||
343 | xorl RW2d, left##2d; \ | ||
344 | rorl $1, right##2d; \ | ||
345 | \ | ||
346 | do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ | ||
347 | do_permutation(right##0d, left##0d, 2, 0x33333333); \ | ||
348 | do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ | ||
349 | do_permutation(right##1d, left##1d, 2, 0x33333333); \ | ||
350 | do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ | ||
351 | do_permutation(right##2d, left##2d, 2, 0x33333333); \ | ||
352 | \ | ||
353 | do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ | ||
354 | do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ | ||
355 | do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ | ||
356 | do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ | ||
357 | do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ | ||
358 | do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); | ||
359 | |||
360 | #define round3(n, from, to, load_next_key, do_movq) \ | ||
361 | xorq from##0, RW0; \ | ||
362 | movzbl RW0bl, RT3d; \ | ||
363 | movzbl RW0bh, RT1d; \ | ||
364 | shrq $16, RW0; \ | ||
365 | xorq s8(, RT3, 8), to##0; \ | ||
366 | xorq s6(, RT1, 8), to##0; \ | ||
367 | movzbl RW0bl, RT3d; \ | ||
368 | movzbl RW0bh, RT1d; \ | ||
369 | shrq $16, RW0; \ | ||
370 | xorq s4(, RT3, 8), to##0; \ | ||
371 | xorq s2(, RT1, 8), to##0; \ | ||
372 | movzbl RW0bl, RT3d; \ | ||
373 | movzbl RW0bh, RT1d; \ | ||
374 | shrl $16, RW0d; \ | ||
375 | xorq s7(, RT3, 8), to##0; \ | ||
376 | xorq s5(, RT1, 8), to##0; \ | ||
377 | movzbl RW0bl, RT3d; \ | ||
378 | movzbl RW0bh, RT1d; \ | ||
379 | load_next_key(n, RW0); \ | ||
380 | xorq s3(, RT3, 8), to##0; \ | ||
381 | xorq s1(, RT1, 8), to##0; \ | ||
382 | xorq from##1, RW1; \ | ||
383 | movzbl RW1bl, RT3d; \ | ||
384 | movzbl RW1bh, RT1d; \ | ||
385 | shrq $16, RW1; \ | ||
386 | xorq s8(, RT3, 8), to##1; \ | ||
387 | xorq s6(, RT1, 8), to##1; \ | ||
388 | movzbl RW1bl, RT3d; \ | ||
389 | movzbl RW1bh, RT1d; \ | ||
390 | shrq $16, RW1; \ | ||
391 | xorq s4(, RT3, 8), to##1; \ | ||
392 | xorq s2(, RT1, 8), to##1; \ | ||
393 | movzbl RW1bl, RT3d; \ | ||
394 | movzbl RW1bh, RT1d; \ | ||
395 | shrl $16, RW1d; \ | ||
396 | xorq s7(, RT3, 8), to##1; \ | ||
397 | xorq s5(, RT1, 8), to##1; \ | ||
398 | movzbl RW1bl, RT3d; \ | ||
399 | movzbl RW1bh, RT1d; \ | ||
400 | do_movq(RW0, RW1); \ | ||
401 | xorq s3(, RT3, 8), to##1; \ | ||
402 | xorq s1(, RT1, 8), to##1; \ | ||
403 | xorq from##2, RW2; \ | ||
404 | movzbl RW2bl, RT3d; \ | ||
405 | movzbl RW2bh, RT1d; \ | ||
406 | shrq $16, RW2; \ | ||
407 | xorq s8(, RT3, 8), to##2; \ | ||
408 | xorq s6(, RT1, 8), to##2; \ | ||
409 | movzbl RW2bl, RT3d; \ | ||
410 | movzbl RW2bh, RT1d; \ | ||
411 | shrq $16, RW2; \ | ||
412 | xorq s4(, RT3, 8), to##2; \ | ||
413 | xorq s2(, RT1, 8), to##2; \ | ||
414 | movzbl RW2bl, RT3d; \ | ||
415 | movzbl RW2bh, RT1d; \ | ||
416 | shrl $16, RW2d; \ | ||
417 | xorq s7(, RT3, 8), to##2; \ | ||
418 | xorq s5(, RT1, 8), to##2; \ | ||
419 | movzbl RW2bl, RT3d; \ | ||
420 | movzbl RW2bh, RT1d; \ | ||
421 | do_movq(RW0, RW2); \ | ||
422 | xorq s3(, RT3, 8), to##2; \ | ||
423 | xorq s1(, RT1, 8), to##2; | ||
424 | |||
425 | #define __movq(src, dst) \ | ||
426 | movq src, dst; | ||
427 | |||
428 | ENTRY(des3_ede_x86_64_crypt_blk_3way) | ||
429 | /* input: | ||
430 | * %rdi: ctx, round keys | ||
431 | * %rsi: dst (3 blocks) | ||
432 | * %rdx: src (3 blocks) | ||
433 | */ | ||
434 | |||
435 | pushq %rbp; | ||
436 | pushq %rbx; | ||
437 | pushq %r12; | ||
438 | pushq %r13; | ||
439 | pushq %r14; | ||
440 | pushq %r15; | ||
441 | |||
442 | /* load input */ | ||
443 | movl 0 * 4(%rdx), RL0d; | ||
444 | movl 1 * 4(%rdx), RR0d; | ||
445 | movl 2 * 4(%rdx), RL1d; | ||
446 | movl 3 * 4(%rdx), RR1d; | ||
447 | movl 4 * 4(%rdx), RL2d; | ||
448 | movl 5 * 4(%rdx), RR2d; | ||
449 | |||
450 | bswapl RL0d; | ||
451 | bswapl RR0d; | ||
452 | bswapl RL1d; | ||
453 | bswapl RR1d; | ||
454 | bswapl RL2d; | ||
455 | bswapl RR2d; | ||
456 | |||
457 | initial_permutation3(RL, RR); | ||
458 | |||
459 | movq 0(CTX), RW0; | ||
460 | movq RW0, RW1; | ||
461 | movq RW0, RW2; | ||
462 | |||
463 | round3(0, RR, RL, load_next_key, __movq); | ||
464 | round3(1, RL, RR, load_next_key, __movq); | ||
465 | round3(2, RR, RL, load_next_key, __movq); | ||
466 | round3(3, RL, RR, load_next_key, __movq); | ||
467 | round3(4, RR, RL, load_next_key, __movq); | ||
468 | round3(5, RL, RR, load_next_key, __movq); | ||
469 | round3(6, RR, RL, load_next_key, __movq); | ||
470 | round3(7, RL, RR, load_next_key, __movq); | ||
471 | round3(8, RR, RL, load_next_key, __movq); | ||
472 | round3(9, RL, RR, load_next_key, __movq); | ||
473 | round3(10, RR, RL, load_next_key, __movq); | ||
474 | round3(11, RL, RR, load_next_key, __movq); | ||
475 | round3(12, RR, RL, load_next_key, __movq); | ||
476 | round3(13, RL, RR, load_next_key, __movq); | ||
477 | round3(14, RR, RL, load_next_key, __movq); | ||
478 | round3(15, RL, RR, load_next_key, __movq); | ||
479 | |||
480 | round3(16+0, RL, RR, load_next_key, __movq); | ||
481 | round3(16+1, RR, RL, load_next_key, __movq); | ||
482 | round3(16+2, RL, RR, load_next_key, __movq); | ||
483 | round3(16+3, RR, RL, load_next_key, __movq); | ||
484 | round3(16+4, RL, RR, load_next_key, __movq); | ||
485 | round3(16+5, RR, RL, load_next_key, __movq); | ||
486 | round3(16+6, RL, RR, load_next_key, __movq); | ||
487 | round3(16+7, RR, RL, load_next_key, __movq); | ||
488 | round3(16+8, RL, RR, load_next_key, __movq); | ||
489 | round3(16+9, RR, RL, load_next_key, __movq); | ||
490 | round3(16+10, RL, RR, load_next_key, __movq); | ||
491 | round3(16+11, RR, RL, load_next_key, __movq); | ||
492 | round3(16+12, RL, RR, load_next_key, __movq); | ||
493 | round3(16+13, RR, RL, load_next_key, __movq); | ||
494 | round3(16+14, RL, RR, load_next_key, __movq); | ||
495 | round3(16+15, RR, RL, load_next_key, __movq); | ||
496 | |||
497 | round3(32+0, RR, RL, load_next_key, __movq); | ||
498 | round3(32+1, RL, RR, load_next_key, __movq); | ||
499 | round3(32+2, RR, RL, load_next_key, __movq); | ||
500 | round3(32+3, RL, RR, load_next_key, __movq); | ||
501 | round3(32+4, RR, RL, load_next_key, __movq); | ||
502 | round3(32+5, RL, RR, load_next_key, __movq); | ||
503 | round3(32+6, RR, RL, load_next_key, __movq); | ||
504 | round3(32+7, RL, RR, load_next_key, __movq); | ||
505 | round3(32+8, RR, RL, load_next_key, __movq); | ||
506 | round3(32+9, RL, RR, load_next_key, __movq); | ||
507 | round3(32+10, RR, RL, load_next_key, __movq); | ||
508 | round3(32+11, RL, RR, load_next_key, __movq); | ||
509 | round3(32+12, RR, RL, load_next_key, __movq); | ||
510 | round3(32+13, RL, RR, load_next_key, __movq); | ||
511 | round3(32+14, RR, RL, load_next_key, __movq); | ||
512 | round3(32+15, RL, RR, dummy2, dummy2); | ||
513 | |||
514 | final_permutation3(RR, RL); | ||
515 | |||
516 | bswapl RR0d; | ||
517 | bswapl RL0d; | ||
518 | bswapl RR1d; | ||
519 | bswapl RL1d; | ||
520 | bswapl RR2d; | ||
521 | bswapl RL2d; | ||
522 | |||
523 | movl RR0d, 0 * 4(%rsi); | ||
524 | movl RL0d, 1 * 4(%rsi); | ||
525 | movl RR1d, 2 * 4(%rsi); | ||
526 | movl RL1d, 3 * 4(%rsi); | ||
527 | movl RR2d, 4 * 4(%rsi); | ||
528 | movl RL2d, 5 * 4(%rsi); | ||
529 | |||
530 | popq %r15; | ||
531 | popq %r14; | ||
532 | popq %r13; | ||
533 | popq %r12; | ||
534 | popq %rbx; | ||
535 | popq %rbp; | ||
536 | |||
537 | ret; | ||
538 | ENDPROC(des3_ede_x86_64_crypt_blk_3way) | ||
539 | |||
540 | .data | ||
541 | .align 16 | ||
542 | .L_s1: | ||
543 | .quad 0x0010100001010400, 0x0000000000000000 | ||
544 | .quad 0x0000100000010000, 0x0010100001010404 | ||
545 | .quad 0x0010100001010004, 0x0000100000010404 | ||
546 | .quad 0x0000000000000004, 0x0000100000010000 | ||
547 | .quad 0x0000000000000400, 0x0010100001010400 | ||
548 | .quad 0x0010100001010404, 0x0000000000000400 | ||
549 | .quad 0x0010000001000404, 0x0010100001010004 | ||
550 | .quad 0x0010000001000000, 0x0000000000000004 | ||
551 | .quad 0x0000000000000404, 0x0010000001000400 | ||
552 | .quad 0x0010000001000400, 0x0000100000010400 | ||
553 | .quad 0x0000100000010400, 0x0010100001010000 | ||
554 | .quad 0x0010100001010000, 0x0010000001000404 | ||
555 | .quad 0x0000100000010004, 0x0010000001000004 | ||
556 | .quad 0x0010000001000004, 0x0000100000010004 | ||
557 | .quad 0x0000000000000000, 0x0000000000000404 | ||
558 | .quad 0x0000100000010404, 0x0010000001000000 | ||
559 | .quad 0x0000100000010000, 0x0010100001010404 | ||
560 | .quad 0x0000000000000004, 0x0010100001010000 | ||
561 | .quad 0x0010100001010400, 0x0010000001000000 | ||
562 | .quad 0x0010000001000000, 0x0000000000000400 | ||
563 | .quad 0x0010100001010004, 0x0000100000010000 | ||
564 | .quad 0x0000100000010400, 0x0010000001000004 | ||
565 | .quad 0x0000000000000400, 0x0000000000000004 | ||
566 | .quad 0x0010000001000404, 0x0000100000010404 | ||
567 | .quad 0x0010100001010404, 0x0000100000010004 | ||
568 | .quad 0x0010100001010000, 0x0010000001000404 | ||
569 | .quad 0x0010000001000004, 0x0000000000000404 | ||
570 | .quad 0x0000100000010404, 0x0010100001010400 | ||
571 | .quad 0x0000000000000404, 0x0010000001000400 | ||
572 | .quad 0x0010000001000400, 0x0000000000000000 | ||
573 | .quad 0x0000100000010004, 0x0000100000010400 | ||
574 | .quad 0x0000000000000000, 0x0010100001010004 | ||
575 | .L_s2: | ||
576 | .quad 0x0801080200100020, 0x0800080000000000 | ||
577 | .quad 0x0000080000000000, 0x0001080200100020 | ||
578 | .quad 0x0001000000100000, 0x0000000200000020 | ||
579 | .quad 0x0801000200100020, 0x0800080200000020 | ||
580 | .quad 0x0800000200000020, 0x0801080200100020 | ||
581 | .quad 0x0801080000100000, 0x0800000000000000 | ||
582 | .quad 0x0800080000000000, 0x0001000000100000 | ||
583 | .quad 0x0000000200000020, 0x0801000200100020 | ||
584 | .quad 0x0001080000100000, 0x0001000200100020 | ||
585 | .quad 0x0800080200000020, 0x0000000000000000 | ||
586 | .quad 0x0800000000000000, 0x0000080000000000 | ||
587 | .quad 0x0001080200100020, 0x0801000000100000 | ||
588 | .quad 0x0001000200100020, 0x0800000200000020 | ||
589 | .quad 0x0000000000000000, 0x0001080000100000 | ||
590 | .quad 0x0000080200000020, 0x0801080000100000 | ||
591 | .quad 0x0801000000100000, 0x0000080200000020 | ||
592 | .quad 0x0000000000000000, 0x0001080200100020 | ||
593 | .quad 0x0801000200100020, 0x0001000000100000 | ||
594 | .quad 0x0800080200000020, 0x0801000000100000 | ||
595 | .quad 0x0801080000100000, 0x0000080000000000 | ||
596 | .quad 0x0801000000100000, 0x0800080000000000 | ||
597 | .quad 0x0000000200000020, 0x0801080200100020 | ||
598 | .quad 0x0001080200100020, 0x0000000200000020 | ||
599 | .quad 0x0000080000000000, 0x0800000000000000 | ||
600 | .quad 0x0000080200000020, 0x0801080000100000 | ||
601 | .quad 0x0001000000100000, 0x0800000200000020 | ||
602 | .quad 0x0001000200100020, 0x0800080200000020 | ||
603 | .quad 0x0800000200000020, 0x0001000200100020 | ||
604 | .quad 0x0001080000100000, 0x0000000000000000 | ||
605 | .quad 0x0800080000000000, 0x0000080200000020 | ||
606 | .quad 0x0800000000000000, 0x0801000200100020 | ||
607 | .quad 0x0801080200100020, 0x0001080000100000 | ||
608 | .L_s3: | ||
609 | .quad 0x0000002000000208, 0x0000202008020200 | ||
610 | .quad 0x0000000000000000, 0x0000200008020008 | ||
611 | .quad 0x0000002008000200, 0x0000000000000000 | ||
612 | .quad 0x0000202000020208, 0x0000002008000200 | ||
613 | .quad 0x0000200000020008, 0x0000000008000008 | ||
614 | .quad 0x0000000008000008, 0x0000200000020000 | ||
615 | .quad 0x0000202008020208, 0x0000200000020008 | ||
616 | .quad 0x0000200008020000, 0x0000002000000208 | ||
617 | .quad 0x0000000008000000, 0x0000000000000008 | ||
618 | .quad 0x0000202008020200, 0x0000002000000200 | ||
619 | .quad 0x0000202000020200, 0x0000200008020000 | ||
620 | .quad 0x0000200008020008, 0x0000202000020208 | ||
621 | .quad 0x0000002008000208, 0x0000202000020200 | ||
622 | .quad 0x0000200000020000, 0x0000002008000208 | ||
623 | .quad 0x0000000000000008, 0x0000202008020208 | ||
624 | .quad 0x0000002000000200, 0x0000000008000000 | ||
625 | .quad 0x0000202008020200, 0x0000000008000000 | ||
626 | .quad 0x0000200000020008, 0x0000002000000208 | ||
627 | .quad 0x0000200000020000, 0x0000202008020200 | ||
628 | .quad 0x0000002008000200, 0x0000000000000000 | ||
629 | .quad 0x0000002000000200, 0x0000200000020008 | ||
630 | .quad 0x0000202008020208, 0x0000002008000200 | ||
631 | .quad 0x0000000008000008, 0x0000002000000200 | ||
632 | .quad 0x0000000000000000, 0x0000200008020008 | ||
633 | .quad 0x0000002008000208, 0x0000200000020000 | ||
634 | .quad 0x0000000008000000, 0x0000202008020208 | ||
635 | .quad 0x0000000000000008, 0x0000202000020208 | ||
636 | .quad 0x0000202000020200, 0x0000000008000008 | ||
637 | .quad 0x0000200008020000, 0x0000002008000208 | ||
638 | .quad 0x0000002000000208, 0x0000200008020000 | ||
639 | .quad 0x0000202000020208, 0x0000000000000008 | ||
640 | .quad 0x0000200008020008, 0x0000202000020200 | ||
641 | .L_s4: | ||
642 | .quad 0x1008020000002001, 0x1000020800002001 | ||
643 | .quad 0x1000020800002001, 0x0000000800000000 | ||
644 | .quad 0x0008020800002000, 0x1008000800000001 | ||
645 | .quad 0x1008000000000001, 0x1000020000002001 | ||
646 | .quad 0x0000000000000000, 0x0008020000002000 | ||
647 | .quad 0x0008020000002000, 0x1008020800002001 | ||
648 | .quad 0x1000000800000001, 0x0000000000000000 | ||
649 | .quad 0x0008000800000000, 0x1008000000000001 | ||
650 | .quad 0x1000000000000001, 0x0000020000002000 | ||
651 | .quad 0x0008000000000000, 0x1008020000002001 | ||
652 | .quad 0x0000000800000000, 0x0008000000000000 | ||
653 | .quad 0x1000020000002001, 0x0000020800002000 | ||
654 | .quad 0x1008000800000001, 0x1000000000000001 | ||
655 | .quad 0x0000020800002000, 0x0008000800000000 | ||
656 | .quad 0x0000020000002000, 0x0008020800002000 | ||
657 | .quad 0x1008020800002001, 0x1000000800000001 | ||
658 | .quad 0x0008000800000000, 0x1008000000000001 | ||
659 | .quad 0x0008020000002000, 0x1008020800002001 | ||
660 | .quad 0x1000000800000001, 0x0000000000000000 | ||
661 | .quad 0x0000000000000000, 0x0008020000002000 | ||
662 | .quad 0x0000020800002000, 0x0008000800000000 | ||
663 | .quad 0x1008000800000001, 0x1000000000000001 | ||
664 | .quad 0x1008020000002001, 0x1000020800002001 | ||
665 | .quad 0x1000020800002001, 0x0000000800000000 | ||
666 | .quad 0x1008020800002001, 0x1000000800000001 | ||
667 | .quad 0x1000000000000001, 0x0000020000002000 | ||
668 | .quad 0x1008000000000001, 0x1000020000002001 | ||
669 | .quad 0x0008020800002000, 0x1008000800000001 | ||
670 | .quad 0x1000020000002001, 0x0000020800002000 | ||
671 | .quad 0x0008000000000000, 0x1008020000002001 | ||
672 | .quad 0x0000000800000000, 0x0008000000000000 | ||
673 | .quad 0x0000020000002000, 0x0008020800002000 | ||
674 | .L_s5: | ||
675 | .quad 0x0000001000000100, 0x0020001002080100 | ||
676 | .quad 0x0020000002080000, 0x0420001002000100 | ||
677 | .quad 0x0000000000080000, 0x0000001000000100 | ||
678 | .quad 0x0400000000000000, 0x0020000002080000 | ||
679 | .quad 0x0400001000080100, 0x0000000000080000 | ||
680 | .quad 0x0020001002000100, 0x0400001000080100 | ||
681 | .quad 0x0420001002000100, 0x0420000002080000 | ||
682 | .quad 0x0000001000080100, 0x0400000000000000 | ||
683 | .quad 0x0020000002000000, 0x0400000000080000 | ||
684 | .quad 0x0400000000080000, 0x0000000000000000 | ||
685 | .quad 0x0400001000000100, 0x0420001002080100 | ||
686 | .quad 0x0420001002080100, 0x0020001002000100 | ||
687 | .quad 0x0420000002080000, 0x0400001000000100 | ||
688 | .quad 0x0000000000000000, 0x0420000002000000 | ||
689 | .quad 0x0020001002080100, 0x0020000002000000 | ||
690 | .quad 0x0420000002000000, 0x0000001000080100 | ||
691 | .quad 0x0000000000080000, 0x0420001002000100 | ||
692 | .quad 0x0000001000000100, 0x0020000002000000 | ||
693 | .quad 0x0400000000000000, 0x0020000002080000 | ||
694 | .quad 0x0420001002000100, 0x0400001000080100 | ||
695 | .quad 0x0020001002000100, 0x0400000000000000 | ||
696 | .quad 0x0420000002080000, 0x0020001002080100 | ||
697 | .quad 0x0400001000080100, 0x0000001000000100 | ||
698 | .quad 0x0020000002000000, 0x0420000002080000 | ||
699 | .quad 0x0420001002080100, 0x0000001000080100 | ||
700 | .quad 0x0420000002000000, 0x0420001002080100 | ||
701 | .quad 0x0020000002080000, 0x0000000000000000 | ||
702 | .quad 0x0400000000080000, 0x0420000002000000 | ||
703 | .quad 0x0000001000080100, 0x0020001002000100 | ||
704 | .quad 0x0400001000000100, 0x0000000000080000 | ||
705 | .quad 0x0000000000000000, 0x0400000000080000 | ||
706 | .quad 0x0020001002080100, 0x0400001000000100 | ||
707 | .L_s6: | ||
708 | .quad 0x0200000120000010, 0x0204000020000000 | ||
709 | .quad 0x0000040000000000, 0x0204040120000010 | ||
710 | .quad 0x0204000020000000, 0x0000000100000010 | ||
711 | .quad 0x0204040120000010, 0x0004000000000000 | ||
712 | .quad 0x0200040020000000, 0x0004040100000010 | ||
713 | .quad 0x0004000000000000, 0x0200000120000010 | ||
714 | .quad 0x0004000100000010, 0x0200040020000000 | ||
715 | .quad 0x0200000020000000, 0x0000040100000010 | ||
716 | .quad 0x0000000000000000, 0x0004000100000010 | ||
717 | .quad 0x0200040120000010, 0x0000040000000000 | ||
718 | .quad 0x0004040000000000, 0x0200040120000010 | ||
719 | .quad 0x0000000100000010, 0x0204000120000010 | ||
720 | .quad 0x0204000120000010, 0x0000000000000000 | ||
721 | .quad 0x0004040100000010, 0x0204040020000000 | ||
722 | .quad 0x0000040100000010, 0x0004040000000000 | ||
723 | .quad 0x0204040020000000, 0x0200000020000000 | ||
724 | .quad 0x0200040020000000, 0x0000000100000010 | ||
725 | .quad 0x0204000120000010, 0x0004040000000000 | ||
726 | .quad 0x0204040120000010, 0x0004000000000000 | ||
727 | .quad 0x0000040100000010, 0x0200000120000010 | ||
728 | .quad 0x0004000000000000, 0x0200040020000000 | ||
729 | .quad 0x0200000020000000, 0x0000040100000010 | ||
730 | .quad 0x0200000120000010, 0x0204040120000010 | ||
731 | .quad 0x0004040000000000, 0x0204000020000000 | ||
732 | .quad 0x0004040100000010, 0x0204040020000000 | ||
733 | .quad 0x0000000000000000, 0x0204000120000010 | ||
734 | .quad 0x0000000100000010, 0x0000040000000000 | ||
735 | .quad 0x0204000020000000, 0x0004040100000010 | ||
736 | .quad 0x0000040000000000, 0x0004000100000010 | ||
737 | .quad 0x0200040120000010, 0x0000000000000000 | ||
738 | .quad 0x0204040020000000, 0x0200000020000000 | ||
739 | .quad 0x0004000100000010, 0x0200040120000010 | ||
740 | .L_s7: | ||
741 | .quad 0x0002000000200000, 0x2002000004200002 | ||
742 | .quad 0x2000000004000802, 0x0000000000000000 | ||
743 | .quad 0x0000000000000800, 0x2000000004000802 | ||
744 | .quad 0x2002000000200802, 0x0002000004200800 | ||
745 | .quad 0x2002000004200802, 0x0002000000200000 | ||
746 | .quad 0x0000000000000000, 0x2000000004000002 | ||
747 | .quad 0x2000000000000002, 0x0000000004000000 | ||
748 | .quad 0x2002000004200002, 0x2000000000000802 | ||
749 | .quad 0x0000000004000800, 0x2002000000200802 | ||
750 | .quad 0x2002000000200002, 0x0000000004000800 | ||
751 | .quad 0x2000000004000002, 0x0002000004200000 | ||
752 | .quad 0x0002000004200800, 0x2002000000200002 | ||
753 | .quad 0x0002000004200000, 0x0000000000000800 | ||
754 | .quad 0x2000000000000802, 0x2002000004200802 | ||
755 | .quad 0x0002000000200800, 0x2000000000000002 | ||
756 | .quad 0x0000000004000000, 0x0002000000200800 | ||
757 | .quad 0x0000000004000000, 0x0002000000200800 | ||
758 | .quad 0x0002000000200000, 0x2000000004000802 | ||
759 | .quad 0x2000000004000802, 0x2002000004200002 | ||
760 | .quad 0x2002000004200002, 0x2000000000000002 | ||
761 | .quad 0x2002000000200002, 0x0000000004000000 | ||
762 | .quad 0x0000000004000800, 0x0002000000200000 | ||
763 | .quad 0x0002000004200800, 0x2000000000000802 | ||
764 | .quad 0x2002000000200802, 0x0002000004200800 | ||
765 | .quad 0x2000000000000802, 0x2000000004000002 | ||
766 | .quad 0x2002000004200802, 0x0002000004200000 | ||
767 | .quad 0x0002000000200800, 0x0000000000000000 | ||
768 | .quad 0x2000000000000002, 0x2002000004200802 | ||
769 | .quad 0x0000000000000000, 0x2002000000200802 | ||
770 | .quad 0x0002000004200000, 0x0000000000000800 | ||
771 | .quad 0x2000000004000002, 0x0000000004000800 | ||
772 | .quad 0x0000000000000800, 0x2002000000200002 | ||
773 | .L_s8: | ||
774 | .quad 0x0100010410001000, 0x0000010000001000 | ||
775 | .quad 0x0000000000040000, 0x0100010410041000 | ||
776 | .quad 0x0100000010000000, 0x0100010410001000 | ||
777 | .quad 0x0000000400000000, 0x0100000010000000 | ||
778 | .quad 0x0000000400040000, 0x0100000010040000 | ||
779 | .quad 0x0100010410041000, 0x0000010000041000 | ||
780 | .quad 0x0100010010041000, 0x0000010400041000 | ||
781 | .quad 0x0000010000001000, 0x0000000400000000 | ||
782 | .quad 0x0100000010040000, 0x0100000410000000 | ||
783 | .quad 0x0100010010001000, 0x0000010400001000 | ||
784 | .quad 0x0000010000041000, 0x0000000400040000 | ||
785 | .quad 0x0100000410040000, 0x0100010010041000 | ||
786 | .quad 0x0000010400001000, 0x0000000000000000 | ||
787 | .quad 0x0000000000000000, 0x0100000410040000 | ||
788 | .quad 0x0100000410000000, 0x0100010010001000 | ||
789 | .quad 0x0000010400041000, 0x0000000000040000 | ||
790 | .quad 0x0000010400041000, 0x0000000000040000 | ||
791 | .quad 0x0100010010041000, 0x0000010000001000 | ||
792 | .quad 0x0000000400000000, 0x0100000410040000 | ||
793 | .quad 0x0000010000001000, 0x0000010400041000 | ||
794 | .quad 0x0100010010001000, 0x0000000400000000 | ||
795 | .quad 0x0100000410000000, 0x0100000010040000 | ||
796 | .quad 0x0100000410040000, 0x0100000010000000 | ||
797 | .quad 0x0000000000040000, 0x0100010410001000 | ||
798 | .quad 0x0000000000000000, 0x0100010410041000 | ||
799 | .quad 0x0000000400040000, 0x0100000410000000 | ||
800 | .quad 0x0100000010040000, 0x0100010010001000 | ||
801 | .quad 0x0100010410001000, 0x0000000000000000 | ||
802 | .quad 0x0100010410041000, 0x0000010000041000 | ||
803 | .quad 0x0000010000041000, 0x0000010400001000 | ||
804 | .quad 0x0000010400001000, 0x0000000400040000 | ||
805 | .quad 0x0100000010000000, 0x0100010010041000 | ||
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c new file mode 100644 index 000000000000..0e9c0668fe4e --- /dev/null +++ b/arch/x86/crypto/des3_ede_glue.c | |||
@@ -0,0 +1,509 @@ | |||
1 | /* | ||
2 | * Glue Code for assembler optimized version of 3DES | ||
3 | * | ||
4 | * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
5 | * | ||
6 | * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: | ||
7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | ||
8 | * CTR part based on code (crypto/ctr.c) by: | ||
9 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <asm/processor.h> | ||
24 | #include <crypto/des.h> | ||
25 | #include <linux/crypto.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <crypto/algapi.h> | ||
30 | |||
31 | struct des3_ede_x86_ctx { | ||
32 | u32 enc_expkey[DES3_EDE_EXPKEY_WORDS]; | ||
33 | u32 dec_expkey[DES3_EDE_EXPKEY_WORDS]; | ||
34 | }; | ||
35 | |||
36 | /* regular block cipher functions */ | ||
37 | asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst, | ||
38 | const u8 *src); | ||
39 | |||
40 | /* 3-way parallel cipher functions */ | ||
41 | asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst, | ||
42 | const u8 *src); | ||
43 | |||
44 | static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, | ||
45 | const u8 *src) | ||
46 | { | ||
47 | u32 *enc_ctx = ctx->enc_expkey; | ||
48 | |||
49 | des3_ede_x86_64_crypt_blk(enc_ctx, dst, src); | ||
50 | } | ||
51 | |||
52 | static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, | ||
53 | const u8 *src) | ||
54 | { | ||
55 | u32 *dec_ctx = ctx->dec_expkey; | ||
56 | |||
57 | des3_ede_x86_64_crypt_blk(dec_ctx, dst, src); | ||
58 | } | ||
59 | |||
60 | static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, | ||
61 | const u8 *src) | ||
62 | { | ||
63 | u32 *enc_ctx = ctx->enc_expkey; | ||
64 | |||
65 | des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src); | ||
66 | } | ||
67 | |||
68 | static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, | ||
69 | const u8 *src) | ||
70 | { | ||
71 | u32 *dec_ctx = ctx->dec_expkey; | ||
72 | |||
73 | des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src); | ||
74 | } | ||
75 | |||
76 | static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
77 | { | ||
78 | des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src); | ||
79 | } | ||
80 | |||
81 | static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
82 | { | ||
83 | des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src); | ||
84 | } | ||
85 | |||
86 | static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, | ||
87 | const u32 *expkey) | ||
88 | { | ||
89 | unsigned int bsize = DES3_EDE_BLOCK_SIZE; | ||
90 | unsigned int nbytes; | ||
91 | int err; | ||
92 | |||
93 | err = blkcipher_walk_virt(desc, walk); | ||
94 | |||
95 | while ((nbytes = walk->nbytes)) { | ||
96 | u8 *wsrc = walk->src.virt.addr; | ||
97 | u8 *wdst = walk->dst.virt.addr; | ||
98 | |||
99 | /* Process four block batch */ | ||
100 | if (nbytes >= bsize * 3) { | ||
101 | do { | ||
102 | des3_ede_x86_64_crypt_blk_3way(expkey, wdst, | ||
103 | wsrc); | ||
104 | |||
105 | wsrc += bsize * 3; | ||
106 | wdst += bsize * 3; | ||
107 | nbytes -= bsize * 3; | ||
108 | } while (nbytes >= bsize * 3); | ||
109 | |||
110 | if (nbytes < bsize) | ||
111 | goto done; | ||
112 | } | ||
113 | |||
114 | /* Handle leftovers */ | ||
115 | do { | ||
116 | des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc); | ||
117 | |||
118 | wsrc += bsize; | ||
119 | wdst += bsize; | ||
120 | nbytes -= bsize; | ||
121 | } while (nbytes >= bsize); | ||
122 | |||
123 | done: | ||
124 | err = blkcipher_walk_done(desc, walk, nbytes); | ||
125 | } | ||
126 | |||
127 | return err; | ||
128 | } | ||
129 | |||
130 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
131 | struct scatterlist *src, unsigned int nbytes) | ||
132 | { | ||
133 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
134 | struct blkcipher_walk walk; | ||
135 | |||
136 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
137 | return ecb_crypt(desc, &walk, ctx->enc_expkey); | ||
138 | } | ||
139 | |||
140 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
141 | struct scatterlist *src, unsigned int nbytes) | ||
142 | { | ||
143 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
144 | struct blkcipher_walk walk; | ||
145 | |||
146 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
147 | return ecb_crypt(desc, &walk, ctx->dec_expkey); | ||
148 | } | ||
149 | |||
150 | static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, | ||
151 | struct blkcipher_walk *walk) | ||
152 | { | ||
153 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
154 | unsigned int bsize = DES3_EDE_BLOCK_SIZE; | ||
155 | unsigned int nbytes = walk->nbytes; | ||
156 | u64 *src = (u64 *)walk->src.virt.addr; | ||
157 | u64 *dst = (u64 *)walk->dst.virt.addr; | ||
158 | u64 *iv = (u64 *)walk->iv; | ||
159 | |||
160 | do { | ||
161 | *dst = *src ^ *iv; | ||
162 | des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst); | ||
163 | iv = dst; | ||
164 | |||
165 | src += 1; | ||
166 | dst += 1; | ||
167 | nbytes -= bsize; | ||
168 | } while (nbytes >= bsize); | ||
169 | |||
170 | *(u64 *)walk->iv = *iv; | ||
171 | return nbytes; | ||
172 | } | ||
173 | |||
174 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
175 | struct scatterlist *src, unsigned int nbytes) | ||
176 | { | ||
177 | struct blkcipher_walk walk; | ||
178 | int err; | ||
179 | |||
180 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
181 | err = blkcipher_walk_virt(desc, &walk); | ||
182 | |||
183 | while ((nbytes = walk.nbytes)) { | ||
184 | nbytes = __cbc_encrypt(desc, &walk); | ||
185 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
186 | } | ||
187 | |||
188 | return err; | ||
189 | } | ||
190 | |||
191 | static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, | ||
192 | struct blkcipher_walk *walk) | ||
193 | { | ||
194 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
195 | unsigned int bsize = DES3_EDE_BLOCK_SIZE; | ||
196 | unsigned int nbytes = walk->nbytes; | ||
197 | u64 *src = (u64 *)walk->src.virt.addr; | ||
198 | u64 *dst = (u64 *)walk->dst.virt.addr; | ||
199 | u64 ivs[3 - 1]; | ||
200 | u64 last_iv; | ||
201 | |||
202 | /* Start of the last block. */ | ||
203 | src += nbytes / bsize - 1; | ||
204 | dst += nbytes / bsize - 1; | ||
205 | |||
206 | last_iv = *src; | ||
207 | |||
208 | /* Process four block batch */ | ||
209 | if (nbytes >= bsize * 3) { | ||
210 | do { | ||
211 | nbytes -= bsize * 3 - bsize; | ||
212 | src -= 3 - 1; | ||
213 | dst -= 3 - 1; | ||
214 | |||
215 | ivs[0] = src[0]; | ||
216 | ivs[1] = src[1]; | ||
217 | |||
218 | des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); | ||
219 | |||
220 | dst[1] ^= ivs[0]; | ||
221 | dst[2] ^= ivs[1]; | ||
222 | |||
223 | nbytes -= bsize; | ||
224 | if (nbytes < bsize) | ||
225 | goto done; | ||
226 | |||
227 | *dst ^= *(src - 1); | ||
228 | src -= 1; | ||
229 | dst -= 1; | ||
230 | } while (nbytes >= bsize * 3); | ||
231 | } | ||
232 | |||
233 | /* Handle leftovers */ | ||
234 | for (;;) { | ||
235 | des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src); | ||
236 | |||
237 | nbytes -= bsize; | ||
238 | if (nbytes < bsize) | ||
239 | break; | ||
240 | |||
241 | *dst ^= *(src - 1); | ||
242 | src -= 1; | ||
243 | dst -= 1; | ||
244 | } | ||
245 | |||
246 | done: | ||
247 | *dst ^= *(u64 *)walk->iv; | ||
248 | *(u64 *)walk->iv = last_iv; | ||
249 | |||
250 | return nbytes; | ||
251 | } | ||
252 | |||
253 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
254 | struct scatterlist *src, unsigned int nbytes) | ||
255 | { | ||
256 | struct blkcipher_walk walk; | ||
257 | int err; | ||
258 | |||
259 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
260 | err = blkcipher_walk_virt(desc, &walk); | ||
261 | |||
262 | while ((nbytes = walk.nbytes)) { | ||
263 | nbytes = __cbc_decrypt(desc, &walk); | ||
264 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
265 | } | ||
266 | |||
267 | return err; | ||
268 | } | ||
269 | |||
270 | static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx, | ||
271 | struct blkcipher_walk *walk) | ||
272 | { | ||
273 | u8 *ctrblk = walk->iv; | ||
274 | u8 keystream[DES3_EDE_BLOCK_SIZE]; | ||
275 | u8 *src = walk->src.virt.addr; | ||
276 | u8 *dst = walk->dst.virt.addr; | ||
277 | unsigned int nbytes = walk->nbytes; | ||
278 | |||
279 | des3_ede_enc_blk(ctx, keystream, ctrblk); | ||
280 | crypto_xor(keystream, src, nbytes); | ||
281 | memcpy(dst, keystream, nbytes); | ||
282 | |||
283 | crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE); | ||
284 | } | ||
285 | |||
286 | static unsigned int __ctr_crypt(struct blkcipher_desc *desc, | ||
287 | struct blkcipher_walk *walk) | ||
288 | { | ||
289 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
290 | unsigned int bsize = DES3_EDE_BLOCK_SIZE; | ||
291 | unsigned int nbytes = walk->nbytes; | ||
292 | __be64 *src = (__be64 *)walk->src.virt.addr; | ||
293 | __be64 *dst = (__be64 *)walk->dst.virt.addr; | ||
294 | u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); | ||
295 | __be64 ctrblocks[3]; | ||
296 | |||
297 | /* Process four block batch */ | ||
298 | if (nbytes >= bsize * 3) { | ||
299 | do { | ||
300 | /* create ctrblks for parallel encrypt */ | ||
301 | ctrblocks[0] = cpu_to_be64(ctrblk++); | ||
302 | ctrblocks[1] = cpu_to_be64(ctrblk++); | ||
303 | ctrblocks[2] = cpu_to_be64(ctrblk++); | ||
304 | |||
305 | des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks, | ||
306 | (u8 *)ctrblocks); | ||
307 | |||
308 | dst[0] = src[0] ^ ctrblocks[0]; | ||
309 | dst[1] = src[1] ^ ctrblocks[1]; | ||
310 | dst[2] = src[2] ^ ctrblocks[2]; | ||
311 | |||
312 | src += 3; | ||
313 | dst += 3; | ||
314 | } while ((nbytes -= bsize * 3) >= bsize * 3); | ||
315 | |||
316 | if (nbytes < bsize) | ||
317 | goto done; | ||
318 | } | ||
319 | |||
320 | /* Handle leftovers */ | ||
321 | do { | ||
322 | ctrblocks[0] = cpu_to_be64(ctrblk++); | ||
323 | |||
324 | des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); | ||
325 | |||
326 | dst[0] = src[0] ^ ctrblocks[0]; | ||
327 | |||
328 | src += 1; | ||
329 | dst += 1; | ||
330 | } while ((nbytes -= bsize) >= bsize); | ||
331 | |||
332 | done: | ||
333 | *(__be64 *)walk->iv = cpu_to_be64(ctrblk); | ||
334 | return nbytes; | ||
335 | } | ||
336 | |||
337 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
338 | struct scatterlist *src, unsigned int nbytes) | ||
339 | { | ||
340 | struct blkcipher_walk walk; | ||
341 | int err; | ||
342 | |||
343 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
344 | err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE); | ||
345 | |||
346 | while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) { | ||
347 | nbytes = __ctr_crypt(desc, &walk); | ||
348 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
349 | } | ||
350 | |||
351 | if (walk.nbytes) { | ||
352 | ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); | ||
353 | err = blkcipher_walk_done(desc, &walk, 0); | ||
354 | } | ||
355 | |||
356 | return err; | ||
357 | } | ||
358 | |||
359 | static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
360 | unsigned int keylen) | ||
361 | { | ||
362 | struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm); | ||
363 | u32 i, j, tmp; | ||
364 | int err; | ||
365 | |||
366 | /* Generate encryption context using generic implementation. */ | ||
367 | err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen); | ||
368 | if (err < 0) | ||
369 | return err; | ||
370 | |||
371 | /* Fix encryption context for this implementation and form decryption | ||
372 | * context. */ | ||
373 | j = DES3_EDE_EXPKEY_WORDS - 2; | ||
374 | for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) { | ||
375 | tmp = ror32(ctx->enc_expkey[i + 1], 4); | ||
376 | ctx->enc_expkey[i + 1] = tmp; | ||
377 | |||
378 | ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0]; | ||
379 | ctx->dec_expkey[j + 1] = tmp; | ||
380 | } | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static struct crypto_alg des3_ede_algs[4] = { { | ||
386 | .cra_name = "des3_ede", | ||
387 | .cra_driver_name = "des3_ede-asm", | ||
388 | .cra_priority = 200, | ||
389 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
390 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
391 | .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), | ||
392 | .cra_alignmask = 0, | ||
393 | .cra_module = THIS_MODULE, | ||
394 | .cra_u = { | ||
395 | .cipher = { | ||
396 | .cia_min_keysize = DES3_EDE_KEY_SIZE, | ||
397 | .cia_max_keysize = DES3_EDE_KEY_SIZE, | ||
398 | .cia_setkey = des3_ede_x86_setkey, | ||
399 | .cia_encrypt = des3_ede_x86_encrypt, | ||
400 | .cia_decrypt = des3_ede_x86_decrypt, | ||
401 | } | ||
402 | } | ||
403 | }, { | ||
404 | .cra_name = "ecb(des3_ede)", | ||
405 | .cra_driver_name = "ecb-des3_ede-asm", | ||
406 | .cra_priority = 300, | ||
407 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
408 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
409 | .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), | ||
410 | .cra_alignmask = 0, | ||
411 | .cra_type = &crypto_blkcipher_type, | ||
412 | .cra_module = THIS_MODULE, | ||
413 | .cra_u = { | ||
414 | .blkcipher = { | ||
415 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
416 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
417 | .setkey = des3_ede_x86_setkey, | ||
418 | .encrypt = ecb_encrypt, | ||
419 | .decrypt = ecb_decrypt, | ||
420 | }, | ||
421 | }, | ||
422 | }, { | ||
423 | .cra_name = "cbc(des3_ede)", | ||
424 | .cra_driver_name = "cbc-des3_ede-asm", | ||
425 | .cra_priority = 300, | ||
426 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
427 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
428 | .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), | ||
429 | .cra_alignmask = 0, | ||
430 | .cra_type = &crypto_blkcipher_type, | ||
431 | .cra_module = THIS_MODULE, | ||
432 | .cra_u = { | ||
433 | .blkcipher = { | ||
434 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
435 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
436 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
437 | .setkey = des3_ede_x86_setkey, | ||
438 | .encrypt = cbc_encrypt, | ||
439 | .decrypt = cbc_decrypt, | ||
440 | }, | ||
441 | }, | ||
442 | }, { | ||
443 | .cra_name = "ctr(des3_ede)", | ||
444 | .cra_driver_name = "ctr-des3_ede-asm", | ||
445 | .cra_priority = 300, | ||
446 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
447 | .cra_blocksize = 1, | ||
448 | .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), | ||
449 | .cra_alignmask = 0, | ||
450 | .cra_type = &crypto_blkcipher_type, | ||
451 | .cra_module = THIS_MODULE, | ||
452 | .cra_u = { | ||
453 | .blkcipher = { | ||
454 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
455 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
456 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
457 | .setkey = des3_ede_x86_setkey, | ||
458 | .encrypt = ctr_crypt, | ||
459 | .decrypt = ctr_crypt, | ||
460 | }, | ||
461 | }, | ||
462 | } }; | ||
463 | |||
464 | static bool is_blacklisted_cpu(void) | ||
465 | { | ||
466 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
467 | return false; | ||
468 | |||
469 | if (boot_cpu_data.x86 == 0x0f) { | ||
470 | /* | ||
471 | * On Pentium 4, des3_ede-x86_64 is slower than generic C | ||
472 | * implementation because use of 64bit rotates (which are really | ||
473 | * slow on P4). Therefore blacklist P4s. | ||
474 | */ | ||
475 | return true; | ||
476 | } | ||
477 | |||
478 | return false; | ||
479 | } | ||
480 | |||
481 | static int force; | ||
482 | module_param(force, int, 0); | ||
483 | MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); | ||
484 | |||
485 | static int __init des3_ede_x86_init(void) | ||
486 | { | ||
487 | if (!force && is_blacklisted_cpu()) { | ||
488 | pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n"); | ||
489 | return -ENODEV; | ||
490 | } | ||
491 | |||
492 | return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); | ||
493 | } | ||
494 | |||
495 | static void __exit des3_ede_x86_fini(void) | ||
496 | { | ||
497 | crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); | ||
498 | } | ||
499 | |||
500 | module_init(des3_ede_x86_init); | ||
501 | module_exit(des3_ede_x86_fini); | ||
502 | |||
503 | MODULE_LICENSE("GPL"); | ||
504 | MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); | ||
505 | MODULE_ALIAS("des3_ede"); | ||
506 | MODULE_ALIAS("des3_ede-asm"); | ||
507 | MODULE_ALIAS("des"); | ||
508 | MODULE_ALIAS("des-asm"); | ||
509 | MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>"); | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index ce4012a58781..6345c470650d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -23,7 +23,8 @@ comment "Crypto core or helper" | |||
23 | 23 | ||
24 | config CRYPTO_FIPS | 24 | config CRYPTO_FIPS |
25 | bool "FIPS 200 compliance" | 25 | bool "FIPS 200 compliance" |
26 | depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS | 26 | depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS |
27 | depends on MODULE_SIG | ||
27 | help | 28 | help |
28 | This options enables the fips boot option which is | 29 | This options enables the fips boot option which is |
29 | required if you want to system to operate in a FIPS 200 | 30 | required if you want to system to operate in a FIPS 200 |
@@ -1019,6 +1020,19 @@ config CRYPTO_DES_SPARC64 | |||
1019 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3), | 1020 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3), |
1020 | optimized using SPARC64 crypto opcodes. | 1021 | optimized using SPARC64 crypto opcodes. |
1021 | 1022 | ||
1023 | config CRYPTO_DES3_EDE_X86_64 | ||
1024 | tristate "Triple DES EDE cipher algorithm (x86-64)" | ||
1025 | depends on X86 && 64BIT | ||
1026 | select CRYPTO_ALGAPI | ||
1027 | select CRYPTO_DES | ||
1028 | help | ||
1029 | Triple DES EDE (FIPS 46-3) algorithm. | ||
1030 | |||
1031 | This module provides implementation of the Triple DES EDE cipher | ||
1032 | algorithm that is optimized for x86-64 processors. Two versions of | ||
1033 | algorithm are provided; regular processing one input block and | ||
1034 | one that processes three blocks parallel. | ||
1035 | |||
1022 | config CRYPTO_FCRYPT | 1036 | config CRYPTO_FCRYPT |
1023 | tristate "FCrypt cipher algorithm" | 1037 | tristate "FCrypt cipher algorithm" |
1024 | select CRYPTO_ALGAPI | 1038 | select CRYPTO_ALGAPI |
@@ -1380,6 +1394,40 @@ config CRYPTO_ANSI_CPRNG | |||
1380 | ANSI X9.31 A.2.4. Note that this option must be enabled if | 1394 | ANSI X9.31 A.2.4. Note that this option must be enabled if |
1381 | CRYPTO_FIPS is selected | 1395 | CRYPTO_FIPS is selected |
1382 | 1396 | ||
1397 | menuconfig CRYPTO_DRBG_MENU | ||
1398 | tristate "NIST SP800-90A DRBG" | ||
1399 | help | ||
1400 | NIST SP800-90A compliant DRBG. In the following submenu, one or | ||
1401 | more of the DRBG types must be selected. | ||
1402 | |||
1403 | if CRYPTO_DRBG_MENU | ||
1404 | |||
1405 | config CRYPTO_DRBG_HMAC | ||
1406 | bool "Enable HMAC DRBG" | ||
1407 | default y | ||
1408 | select CRYPTO_HMAC | ||
1409 | help | ||
1410 | Enable the HMAC DRBG variant as defined in NIST SP800-90A. | ||
1411 | |||
1412 | config CRYPTO_DRBG_HASH | ||
1413 | bool "Enable Hash DRBG" | ||
1414 | select CRYPTO_HASH | ||
1415 | help | ||
1416 | Enable the Hash DRBG variant as defined in NIST SP800-90A. | ||
1417 | |||
1418 | config CRYPTO_DRBG_CTR | ||
1419 | bool "Enable CTR DRBG" | ||
1420 | select CRYPTO_AES | ||
1421 | help | ||
1422 | Enable the CTR DRBG variant as defined in NIST SP800-90A. | ||
1423 | |||
1424 | config CRYPTO_DRBG | ||
1425 | tristate | ||
1426 | default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR) | ||
1427 | select CRYPTO_RNG | ||
1428 | |||
1429 | endif # if CRYPTO_DRBG_MENU | ||
1430 | |||
1383 | config CRYPTO_USER_API | 1431 | config CRYPTO_USER_API |
1384 | tristate | 1432 | tristate |
1385 | 1433 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index 38e64231dcd3..cfa57b3f5a4d 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -92,6 +92,7 @@ obj-$(CONFIG_CRYPTO_842) += 842.o | |||
92 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o | 92 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o |
93 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o | 93 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o |
94 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o | 94 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o |
95 | obj-$(CONFIG_CRYPTO_DRBG) += drbg.o | ||
95 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o | 96 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o |
96 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o | 97 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o |
97 | obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o | 98 | obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 7a1ae87f1683..e8d3a7dca8c4 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -41,8 +41,20 @@ static inline int crypto_set_driver_name(struct crypto_alg *alg) | |||
41 | return 0; | 41 | return 0; |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void crypto_check_module_sig(struct module *mod) | ||
45 | { | ||
46 | #ifdef CONFIG_CRYPTO_FIPS | ||
47 | if (fips_enabled && mod && !mod->sig_ok) | ||
48 | panic("Module %s signature verification failed in FIPS mode\n", | ||
49 | mod->name); | ||
50 | #endif | ||
51 | return; | ||
52 | } | ||
53 | |||
44 | static int crypto_check_alg(struct crypto_alg *alg) | 54 | static int crypto_check_alg(struct crypto_alg *alg) |
45 | { | 55 | { |
56 | crypto_check_module_sig(alg->cra_module); | ||
57 | |||
46 | if (alg->cra_alignmask & (alg->cra_alignmask + 1)) | 58 | if (alg->cra_alignmask & (alg->cra_alignmask + 1)) |
47 | return -EINVAL; | 59 | return -EINVAL; |
48 | 60 | ||
@@ -430,6 +442,8 @@ int crypto_register_template(struct crypto_template *tmpl) | |||
430 | 442 | ||
431 | down_write(&crypto_alg_sem); | 443 | down_write(&crypto_alg_sem); |
432 | 444 | ||
445 | crypto_check_module_sig(tmpl->module); | ||
446 | |||
433 | list_for_each_entry(q, &crypto_template_list, list) { | 447 | list_for_each_entry(q, &crypto_template_list, list) { |
434 | if (q == tmpl) | 448 | if (q == tmpl) |
435 | goto out; | 449 | goto out; |
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 7bdd61b867c8..e592c90abebb 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -233,7 +233,7 @@ static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | 235 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, |
236 | crypto_completion_t complete) | 236 | crypto_completion_t compl) |
237 | { | 237 | { |
238 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | 238 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
239 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 239 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
@@ -241,7 +241,7 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |||
241 | 241 | ||
242 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); | 242 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
243 | rctx->complete = req->base.complete; | 243 | rctx->complete = req->base.complete; |
244 | req->base.complete = complete; | 244 | req->base.complete = compl; |
245 | 245 | ||
246 | return cryptd_enqueue_request(queue, &req->base); | 246 | return cryptd_enqueue_request(queue, &req->base); |
247 | } | 247 | } |
@@ -414,7 +414,7 @@ static int cryptd_hash_setkey(struct crypto_ahash *parent, | |||
414 | } | 414 | } |
415 | 415 | ||
416 | static int cryptd_hash_enqueue(struct ahash_request *req, | 416 | static int cryptd_hash_enqueue(struct ahash_request *req, |
417 | crypto_completion_t complete) | 417 | crypto_completion_t compl) |
418 | { | 418 | { |
419 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 419 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
420 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 420 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
@@ -422,7 +422,7 @@ static int cryptd_hash_enqueue(struct ahash_request *req, | |||
422 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | 422 | cryptd_get_queue(crypto_ahash_tfm(tfm)); |
423 | 423 | ||
424 | rctx->complete = req->base.complete; | 424 | rctx->complete = req->base.complete; |
425 | req->base.complete = complete; | 425 | req->base.complete = compl; |
426 | 426 | ||
427 | return cryptd_enqueue_request(queue, &req->base); | 427 | return cryptd_enqueue_request(queue, &req->base); |
428 | } | 428 | } |
@@ -667,14 +667,14 @@ static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | |||
667 | } | 667 | } |
668 | 668 | ||
669 | static int cryptd_aead_enqueue(struct aead_request *req, | 669 | static int cryptd_aead_enqueue(struct aead_request *req, |
670 | crypto_completion_t complete) | 670 | crypto_completion_t compl) |
671 | { | 671 | { |
672 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); | 672 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); |
673 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 673 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
674 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); | 674 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); |
675 | 675 | ||
676 | rctx->complete = req->base.complete; | 676 | rctx->complete = req->base.complete; |
677 | req->base.complete = complete; | 677 | req->base.complete = compl; |
678 | return cryptd_enqueue_request(queue, &req->base); | 678 | return cryptd_enqueue_request(queue, &req->base); |
679 | } | 679 | } |
680 | 680 | ||
diff --git a/crypto/des_generic.c b/crypto/des_generic.c index f6cf63f88468..298d464ab7d2 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c | |||
@@ -859,13 +859,10 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
859 | * property. | 859 | * property. |
860 | * | 860 | * |
861 | */ | 861 | */ |
862 | static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, | 862 | int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, |
863 | unsigned int keylen) | 863 | unsigned int keylen) |
864 | { | 864 | { |
865 | const u32 *K = (const u32 *)key; | 865 | const u32 *K = (const u32 *)key; |
866 | struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); | ||
867 | u32 *expkey = dctx->expkey; | ||
868 | u32 *flags = &tfm->crt_flags; | ||
869 | 866 | ||
870 | if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || | 867 | if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || |
871 | !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && | 868 | !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && |
@@ -880,6 +877,17 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
880 | 877 | ||
881 | return 0; | 878 | return 0; |
882 | } | 879 | } |
880 | EXPORT_SYMBOL_GPL(__des3_ede_setkey); | ||
881 | |||
882 | static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
883 | unsigned int keylen) | ||
884 | { | ||
885 | struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); | ||
886 | u32 *flags = &tfm->crt_flags; | ||
887 | u32 *expkey = dctx->expkey; | ||
888 | |||
889 | return __des3_ede_setkey(expkey, flags, key, keylen); | ||
890 | } | ||
883 | 891 | ||
884 | static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | 892 | static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) |
885 | { | 893 | { |
@@ -945,6 +953,8 @@ static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
945 | 953 | ||
946 | static struct crypto_alg des_algs[2] = { { | 954 | static struct crypto_alg des_algs[2] = { { |
947 | .cra_name = "des", | 955 | .cra_name = "des", |
956 | .cra_driver_name = "des-generic", | ||
957 | .cra_priority = 100, | ||
948 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 958 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
949 | .cra_blocksize = DES_BLOCK_SIZE, | 959 | .cra_blocksize = DES_BLOCK_SIZE, |
950 | .cra_ctxsize = sizeof(struct des_ctx), | 960 | .cra_ctxsize = sizeof(struct des_ctx), |
@@ -958,6 +968,8 @@ static struct crypto_alg des_algs[2] = { { | |||
958 | .cia_decrypt = des_decrypt } } | 968 | .cia_decrypt = des_decrypt } } |
959 | }, { | 969 | }, { |
960 | .cra_name = "des3_ede", | 970 | .cra_name = "des3_ede", |
971 | .cra_driver_name = "des3_ede-generic", | ||
972 | .cra_priority = 100, | ||
961 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 973 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
962 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 974 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
963 | .cra_ctxsize = sizeof(struct des3_ede_ctx), | 975 | .cra_ctxsize = sizeof(struct des3_ede_ctx), |
diff --git a/crypto/drbg.c b/crypto/drbg.c new file mode 100644 index 000000000000..7894db9ca90b --- /dev/null +++ b/crypto/drbg.c | |||
@@ -0,0 +1,2044 @@ | |||
1 | /* | ||
2 | * DRBG: Deterministic Random Bits Generator | ||
3 | * Based on NIST Recommended DRBG from NIST SP800-90A with the following | ||
4 | * properties: | ||
5 | * * CTR DRBG with DF with AES-128, AES-192, AES-256 cores | ||
6 | * * Hash DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores | ||
7 | * * HMAC DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores | ||
8 | * * with and without prediction resistance | ||
9 | * | ||
10 | * Copyright Stephan Mueller <smueller@chronox.de>, 2014 | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or without | ||
13 | * modification, are permitted provided that the following conditions | ||
14 | * are met: | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, and the entire permission notice in its entirety, | ||
17 | * including the disclaimer of warranties. | ||
18 | * 2. Redistributions in binary form must reproduce the above copyright | ||
19 | * notice, this list of conditions and the following disclaimer in the | ||
20 | * documentation and/or other materials provided with the distribution. | ||
21 | * 3. The name of the author may not be used to endorse or promote | ||
22 | * products derived from this software without specific prior | ||
23 | * written permission. | ||
24 | * | ||
25 | * ALTERNATIVELY, this product may be distributed under the terms of | ||
26 | * the GNU General Public License, in which case the provisions of the GPL are | ||
27 | * required INSTEAD OF the above restrictions. (This clause is | ||
28 | * necessary due to a potential bad interaction between the GPL and | ||
29 | * the restrictions contained in a BSD-style copyright.) | ||
30 | * | ||
31 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
32 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | ||
33 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF | ||
34 | * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE | ||
35 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
36 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT | ||
37 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
38 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
39 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
40 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
41 | * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH | ||
42 | * DAMAGE. | ||
43 | * | ||
44 | * DRBG Usage | ||
45 | * ========== | ||
46 | * The SP 800-90A DRBG allows the user to specify a personalization string | ||
47 | * for initialization as well as an additional information string for each | ||
48 | * random number request. The following code fragments show how a caller | ||
49 | * uses the kernel crypto API to use the full functionality of the DRBG. | ||
50 | * | ||
51 | * Usage without any additional data | ||
52 | * --------------------------------- | ||
53 | * struct crypto_rng *drng; | ||
54 | * int err; | ||
55 | * char data[DATALEN]; | ||
56 | * | ||
57 | * drng = crypto_alloc_rng(drng_name, 0, 0); | ||
58 | * err = crypto_rng_get_bytes(drng, &data, DATALEN); | ||
59 | * crypto_free_rng(drng); | ||
60 | * | ||
61 | * | ||
62 | * Usage with personalization string during initialization | ||
63 | * ------------------------------------------------------- | ||
64 | * struct crypto_rng *drng; | ||
65 | * int err; | ||
66 | * char data[DATALEN]; | ||
67 | * struct drbg_string pers; | ||
68 | * char personalization[11] = "some-string"; | ||
69 | * | ||
70 | * drbg_string_fill(&pers, personalization, strlen(personalization)); | ||
71 | * drng = crypto_alloc_rng(drng_name, 0, 0); | ||
72 | * // The reset completely re-initializes the DRBG with the provided | ||
73 | * // personalization string | ||
74 | * err = crypto_rng_reset(drng, &personalization, strlen(personalization)); | ||
75 | * err = crypto_rng_get_bytes(drng, &data, DATALEN); | ||
76 | * crypto_free_rng(drng); | ||
77 | * | ||
78 | * | ||
79 | * Usage with additional information string during random number request | ||
80 | * --------------------------------------------------------------------- | ||
81 | * struct crypto_rng *drng; | ||
82 | * int err; | ||
83 | * char data[DATALEN]; | ||
84 | * char addtl_string[11] = "some-string"; | ||
85 | * string drbg_string addtl; | ||
86 | * | ||
87 | * drbg_string_fill(&addtl, addtl_string, strlen(addtl_string)); | ||
88 | * drng = crypto_alloc_rng(drng_name, 0, 0); | ||
89 | * // The following call is a wrapper to crypto_rng_get_bytes() and returns | ||
90 | * // the same error codes. | ||
91 | * err = crypto_drbg_get_bytes_addtl(drng, &data, DATALEN, &addtl); | ||
92 | * crypto_free_rng(drng); | ||
93 | * | ||
94 | * | ||
95 | * Usage with personalization and additional information strings | ||
96 | * ------------------------------------------------------------- | ||
97 | * Just mix both scenarios above. | ||
98 | */ | ||
99 | |||
100 | #include <crypto/drbg.h> | ||
101 | |||
102 | /*************************************************************** | ||
103 | * Backend cipher definitions available to DRBG | ||
104 | ***************************************************************/ | ||
105 | |||
106 | /* | ||
107 | * The order of the DRBG definitions here matter: every DRBG is registered | ||
108 | * as stdrng. Each DRBG receives an increasing cra_priority values the later | ||
109 | * they are defined in this array (see drbg_fill_array). | ||
110 | * | ||
111 | * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and | ||
112 | * the SHA256 / AES 256 over other ciphers. Thus, the favored | ||
113 | * DRBGs are the latest entries in this array. | ||
114 | */ | ||
115 | static const struct drbg_core drbg_cores[] = { | ||
116 | #ifdef CONFIG_CRYPTO_DRBG_CTR | ||
117 | { | ||
118 | .flags = DRBG_CTR | DRBG_STRENGTH128, | ||
119 | .statelen = 32, /* 256 bits as defined in 10.2.1 */ | ||
120 | .max_addtllen = 35, | ||
121 | .max_bits = 19, | ||
122 | .max_req = 48, | ||
123 | .blocklen_bytes = 16, | ||
124 | .cra_name = "ctr_aes128", | ||
125 | .backend_cra_name = "ecb(aes)", | ||
126 | }, { | ||
127 | .flags = DRBG_CTR | DRBG_STRENGTH192, | ||
128 | .statelen = 40, /* 320 bits as defined in 10.2.1 */ | ||
129 | .max_addtllen = 35, | ||
130 | .max_bits = 19, | ||
131 | .max_req = 48, | ||
132 | .blocklen_bytes = 16, | ||
133 | .cra_name = "ctr_aes192", | ||
134 | .backend_cra_name = "ecb(aes)", | ||
135 | }, { | ||
136 | .flags = DRBG_CTR | DRBG_STRENGTH256, | ||
137 | .statelen = 48, /* 384 bits as defined in 10.2.1 */ | ||
138 | .max_addtllen = 35, | ||
139 | .max_bits = 19, | ||
140 | .max_req = 48, | ||
141 | .blocklen_bytes = 16, | ||
142 | .cra_name = "ctr_aes256", | ||
143 | .backend_cra_name = "ecb(aes)", | ||
144 | }, | ||
145 | #endif /* CONFIG_CRYPTO_DRBG_CTR */ | ||
146 | #ifdef CONFIG_CRYPTO_DRBG_HASH | ||
147 | { | ||
148 | .flags = DRBG_HASH | DRBG_STRENGTH128, | ||
149 | .statelen = 55, /* 440 bits */ | ||
150 | .max_addtllen = 35, | ||
151 | .max_bits = 19, | ||
152 | .max_req = 48, | ||
153 | .blocklen_bytes = 20, | ||
154 | .cra_name = "sha1", | ||
155 | .backend_cra_name = "sha1", | ||
156 | }, { | ||
157 | .flags = DRBG_HASH | DRBG_STRENGTH256, | ||
158 | .statelen = 111, /* 888 bits */ | ||
159 | .max_addtllen = 35, | ||
160 | .max_bits = 19, | ||
161 | .max_req = 48, | ||
162 | .blocklen_bytes = 48, | ||
163 | .cra_name = "sha384", | ||
164 | .backend_cra_name = "sha384", | ||
165 | }, { | ||
166 | .flags = DRBG_HASH | DRBG_STRENGTH256, | ||
167 | .statelen = 111, /* 888 bits */ | ||
168 | .max_addtllen = 35, | ||
169 | .max_bits = 19, | ||
170 | .max_req = 48, | ||
171 | .blocklen_bytes = 64, | ||
172 | .cra_name = "sha512", | ||
173 | .backend_cra_name = "sha512", | ||
174 | }, { | ||
175 | .flags = DRBG_HASH | DRBG_STRENGTH256, | ||
176 | .statelen = 55, /* 440 bits */ | ||
177 | .max_addtllen = 35, | ||
178 | .max_bits = 19, | ||
179 | .max_req = 48, | ||
180 | .blocklen_bytes = 32, | ||
181 | .cra_name = "sha256", | ||
182 | .backend_cra_name = "sha256", | ||
183 | }, | ||
184 | #endif /* CONFIG_CRYPTO_DRBG_HASH */ | ||
185 | #ifdef CONFIG_CRYPTO_DRBG_HMAC | ||
186 | { | ||
187 | .flags = DRBG_HMAC | DRBG_STRENGTH128, | ||
188 | .statelen = 20, /* block length of cipher */ | ||
189 | .max_addtllen = 35, | ||
190 | .max_bits = 19, | ||
191 | .max_req = 48, | ||
192 | .blocklen_bytes = 20, | ||
193 | .cra_name = "hmac_sha1", | ||
194 | .backend_cra_name = "hmac(sha1)", | ||
195 | }, { | ||
196 | .flags = DRBG_HMAC | DRBG_STRENGTH256, | ||
197 | .statelen = 48, /* block length of cipher */ | ||
198 | .max_addtllen = 35, | ||
199 | .max_bits = 19, | ||
200 | .max_req = 48, | ||
201 | .blocklen_bytes = 48, | ||
202 | .cra_name = "hmac_sha384", | ||
203 | .backend_cra_name = "hmac(sha384)", | ||
204 | }, { | ||
205 | .flags = DRBG_HMAC | DRBG_STRENGTH256, | ||
206 | .statelen = 64, /* block length of cipher */ | ||
207 | .max_addtllen = 35, | ||
208 | .max_bits = 19, | ||
209 | .max_req = 48, | ||
210 | .blocklen_bytes = 64, | ||
211 | .cra_name = "hmac_sha512", | ||
212 | .backend_cra_name = "hmac(sha512)", | ||
213 | }, { | ||
214 | .flags = DRBG_HMAC | DRBG_STRENGTH256, | ||
215 | .statelen = 32, /* block length of cipher */ | ||
216 | .max_addtllen = 35, | ||
217 | .max_bits = 19, | ||
218 | .max_req = 48, | ||
219 | .blocklen_bytes = 32, | ||
220 | .cra_name = "hmac_sha256", | ||
221 | .backend_cra_name = "hmac(sha256)", | ||
222 | }, | ||
223 | #endif /* CONFIG_CRYPTO_DRBG_HMAC */ | ||
224 | }; | ||
225 | |||
226 | /****************************************************************** | ||
227 | * Generic helper functions | ||
228 | ******************************************************************/ | ||
229 | |||
230 | /* | ||
231 | * Return strength of DRBG according to SP800-90A section 8.4 | ||
232 | * | ||
233 | * @flags DRBG flags reference | ||
234 | * | ||
235 | * Return: normalized strength in *bytes* value or 32 as default | ||
236 | * to counter programming errors | ||
237 | */ | ||
238 | static inline unsigned short drbg_sec_strength(drbg_flag_t flags) | ||
239 | { | ||
240 | switch (flags & DRBG_STRENGTH_MASK) { | ||
241 | case DRBG_STRENGTH128: | ||
242 | return 16; | ||
243 | case DRBG_STRENGTH192: | ||
244 | return 24; | ||
245 | case DRBG_STRENGTH256: | ||
246 | return 32; | ||
247 | default: | ||
248 | return 32; | ||
249 | } | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * FIPS 140-2 continuous self test | ||
254 | * The test is performed on the result of one round of the output | ||
255 | * function. Thus, the function implicitly knows the size of the | ||
256 | * buffer. | ||
257 | * | ||
258 | * The FIPS test can be called in an endless loop until it returns | ||
259 | * true. Although the code looks like a potential for a deadlock, it | ||
260 | * is not the case, because returning a false cannot mathematically | ||
261 | * occur (except once when a reseed took place and the updated state | ||
262 | * would is now set up such that the generation of new value returns | ||
263 | * an identical one -- this is most unlikely and would happen only once). | ||
264 | * Thus, if this function repeatedly returns false and thus would cause | ||
265 | * a deadlock, the integrity of the entire kernel is lost. | ||
266 | * | ||
267 | * @drbg DRBG handle | ||
268 | * @buf output buffer of random data to be checked | ||
269 | * | ||
270 | * return: | ||
271 | * true on success | ||
272 | * false on error | ||
273 | */ | ||
274 | static bool drbg_fips_continuous_test(struct drbg_state *drbg, | ||
275 | const unsigned char *buf) | ||
276 | { | ||
277 | #ifdef CONFIG_CRYPTO_FIPS | ||
278 | int ret = 0; | ||
279 | /* skip test if we test the overall system */ | ||
280 | if (drbg->test_data) | ||
281 | return true; | ||
282 | /* only perform test in FIPS mode */ | ||
283 | if (0 == fips_enabled) | ||
284 | return true; | ||
285 | if (!drbg->fips_primed) { | ||
286 | /* Priming of FIPS test */ | ||
287 | memcpy(drbg->prev, buf, drbg_blocklen(drbg)); | ||
288 | drbg->fips_primed = true; | ||
289 | /* return false due to priming, i.e. another round is needed */ | ||
290 | return false; | ||
291 | } | ||
292 | ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg)); | ||
293 | memcpy(drbg->prev, buf, drbg_blocklen(drbg)); | ||
294 | /* the test shall pass when the two compared values are not equal */ | ||
295 | return ret != 0; | ||
296 | #else | ||
297 | return true; | ||
298 | #endif /* CONFIG_CRYPTO_FIPS */ | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * Convert an integer into a byte representation of this integer. | ||
303 | * The byte representation is big-endian | ||
304 | * | ||
305 | * @buf buffer holding the converted integer | ||
306 | * @val value to be converted | ||
307 | * @buflen length of buffer | ||
308 | */ | ||
309 | #if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR)) | ||
310 | static inline void drbg_int2byte(unsigned char *buf, uint64_t val, | ||
311 | size_t buflen) | ||
312 | { | ||
313 | unsigned char *byte; | ||
314 | uint64_t i; | ||
315 | |||
316 | byte = buf + (buflen - 1); | ||
317 | for (i = 0; i < buflen; i++) | ||
318 | *(byte--) = val >> (i * 8) & 0xff; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Increment buffer | ||
323 | * | ||
324 | * @dst buffer to increment | ||
325 | * @add value to add | ||
326 | */ | ||
327 | static inline void drbg_add_buf(unsigned char *dst, size_t dstlen, | ||
328 | const unsigned char *add, size_t addlen) | ||
329 | { | ||
330 | /* implied: dstlen > addlen */ | ||
331 | unsigned char *dstptr; | ||
332 | const unsigned char *addptr; | ||
333 | unsigned int remainder = 0; | ||
334 | size_t len = addlen; | ||
335 | |||
336 | dstptr = dst + (dstlen-1); | ||
337 | addptr = add + (addlen-1); | ||
338 | while (len) { | ||
339 | remainder += *dstptr + *addptr; | ||
340 | *dstptr = remainder & 0xff; | ||
341 | remainder >>= 8; | ||
342 | len--; dstptr--; addptr--; | ||
343 | } | ||
344 | len = dstlen - addlen; | ||
345 | while (len && remainder > 0) { | ||
346 | remainder = *dstptr + 1; | ||
347 | *dstptr = remainder & 0xff; | ||
348 | remainder >>= 8; | ||
349 | len--; dstptr--; | ||
350 | } | ||
351 | } | ||
352 | #endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */ | ||
353 | |||
354 | /****************************************************************** | ||
355 | * CTR DRBG callback functions | ||
356 | ******************************************************************/ | ||
357 | |||
358 | #ifdef CONFIG_CRYPTO_DRBG_CTR | ||
359 | #define CRYPTO_DRBG_CTR_STRING "CTR " | ||
360 | static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, | ||
361 | unsigned char *outval, const struct drbg_string *in); | ||
362 | static int drbg_init_sym_kernel(struct drbg_state *drbg); | ||
363 | static int drbg_fini_sym_kernel(struct drbg_state *drbg); | ||
364 | |||
365 | /* BCC function for CTR DRBG as defined in 10.4.3 */ | ||
366 | static int drbg_ctr_bcc(struct drbg_state *drbg, | ||
367 | unsigned char *out, const unsigned char *key, | ||
368 | struct list_head *in) | ||
369 | { | ||
370 | int ret = 0; | ||
371 | struct drbg_string *curr = NULL; | ||
372 | struct drbg_string data; | ||
373 | short cnt = 0; | ||
374 | |||
375 | drbg_string_fill(&data, out, drbg_blocklen(drbg)); | ||
376 | |||
377 | /* 10.4.3 step 1 */ | ||
378 | memset(out, 0, drbg_blocklen(drbg)); | ||
379 | |||
380 | /* 10.4.3 step 2 / 4 */ | ||
381 | list_for_each_entry(curr, in, list) { | ||
382 | const unsigned char *pos = curr->buf; | ||
383 | size_t len = curr->len; | ||
384 | /* 10.4.3 step 4.1 */ | ||
385 | while (len) { | ||
386 | /* 10.4.3 step 4.2 */ | ||
387 | if (drbg_blocklen(drbg) == cnt) { | ||
388 | cnt = 0; | ||
389 | ret = drbg_kcapi_sym(drbg, key, out, &data); | ||
390 | if (ret) | ||
391 | return ret; | ||
392 | } | ||
393 | out[cnt] ^= *pos; | ||
394 | pos++; | ||
395 | cnt++; | ||
396 | len--; | ||
397 | } | ||
398 | } | ||
399 | /* 10.4.3 step 4.2 for last block */ | ||
400 | if (cnt) | ||
401 | ret = drbg_kcapi_sym(drbg, key, out, &data); | ||
402 | |||
403 | return ret; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df | ||
408 | * (and drbg_ctr_bcc, but this function does not need any temporary buffers), | ||
409 | * the scratchpad is used as follows: | ||
410 | * drbg_ctr_update: | ||
411 | * temp | ||
412 | * start: drbg->scratchpad | ||
413 | * length: drbg_statelen(drbg) + drbg_blocklen(drbg) | ||
414 | * note: the cipher writing into this variable works | ||
415 | * blocklen-wise. Now, when the statelen is not a multiple | ||
416 | * of blocklen, the generateion loop below "spills over" | ||
417 | * by at most blocklen. Thus, we need to give sufficient | ||
418 | * memory. | ||
419 | * df_data | ||
420 | * start: drbg->scratchpad + | ||
421 | * drbg_statelen(drbg) + drbg_blocklen(drbg) | ||
422 | * length: drbg_statelen(drbg) | ||
423 | * | ||
424 | * drbg_ctr_df: | ||
425 | * pad | ||
426 | * start: df_data + drbg_statelen(drbg) | ||
427 | * length: drbg_blocklen(drbg) | ||
428 | * iv | ||
429 | * start: pad + drbg_blocklen(drbg) | ||
430 | * length: drbg_blocklen(drbg) | ||
431 | * temp | ||
432 | * start: iv + drbg_blocklen(drbg) | ||
433 | * length: drbg_satelen(drbg) + drbg_blocklen(drbg) | ||
434 | * note: temp is the buffer that the BCC function operates | ||
435 | * on. BCC operates blockwise. drbg_statelen(drbg) | ||
436 | * is sufficient when the DRBG state length is a multiple | ||
437 | * of the block size. For AES192 (and maybe other ciphers) | ||
438 | * this is not correct and the length for temp is | ||
439 | * insufficient (yes, that also means for such ciphers, | ||
440 | * the final output of all BCC rounds are truncated). | ||
441 | * Therefore, add drbg_blocklen(drbg) to cover all | ||
442 | * possibilities. | ||
443 | */ | ||
444 | |||
445 | /* Derivation Function for CTR DRBG as defined in 10.4.2 */ | ||
446 | static int drbg_ctr_df(struct drbg_state *drbg, | ||
447 | unsigned char *df_data, size_t bytes_to_return, | ||
448 | struct list_head *seedlist) | ||
449 | { | ||
450 | int ret = -EFAULT; | ||
451 | unsigned char L_N[8]; | ||
452 | /* S3 is input */ | ||
453 | struct drbg_string S1, S2, S4, cipherin; | ||
454 | LIST_HEAD(bcc_list); | ||
455 | unsigned char *pad = df_data + drbg_statelen(drbg); | ||
456 | unsigned char *iv = pad + drbg_blocklen(drbg); | ||
457 | unsigned char *temp = iv + drbg_blocklen(drbg); | ||
458 | size_t padlen = 0; | ||
459 | unsigned int templen = 0; | ||
460 | /* 10.4.2 step 7 */ | ||
461 | unsigned int i = 0; | ||
462 | /* 10.4.2 step 8 */ | ||
463 | const unsigned char *K = (unsigned char *) | ||
464 | "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
465 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
466 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
467 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"; | ||
468 | unsigned char *X; | ||
469 | size_t generated_len = 0; | ||
470 | size_t inputlen = 0; | ||
471 | struct drbg_string *seed = NULL; | ||
472 | |||
473 | memset(pad, 0, drbg_blocklen(drbg)); | ||
474 | memset(iv, 0, drbg_blocklen(drbg)); | ||
475 | memset(temp, 0, drbg_statelen(drbg)); | ||
476 | |||
477 | /* 10.4.2 step 1 is implicit as we work byte-wise */ | ||
478 | |||
479 | /* 10.4.2 step 2 */ | ||
480 | if ((512/8) < bytes_to_return) | ||
481 | return -EINVAL; | ||
482 | |||
483 | /* 10.4.2 step 2 -- calculate the entire length of all input data */ | ||
484 | list_for_each_entry(seed, seedlist, list) | ||
485 | inputlen += seed->len; | ||
486 | drbg_int2byte(&L_N[0], inputlen, 4); | ||
487 | |||
488 | /* 10.4.2 step 3 */ | ||
489 | drbg_int2byte(&L_N[4], bytes_to_return, 4); | ||
490 | |||
491 | /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */ | ||
492 | padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg)); | ||
493 | /* wrap the padlen appropriately */ | ||
494 | if (padlen) | ||
495 | padlen = drbg_blocklen(drbg) - padlen; | ||
496 | /* | ||
497 | * pad / padlen contains the 0x80 byte and the following zero bytes. | ||
498 | * As the calculated padlen value only covers the number of zero | ||
499 | * bytes, this value has to be incremented by one for the 0x80 byte. | ||
500 | */ | ||
501 | padlen++; | ||
502 | pad[0] = 0x80; | ||
503 | |||
504 | /* 10.4.2 step 4 -- first fill the linked list and then order it */ | ||
505 | drbg_string_fill(&S1, iv, drbg_blocklen(drbg)); | ||
506 | list_add_tail(&S1.list, &bcc_list); | ||
507 | drbg_string_fill(&S2, L_N, sizeof(L_N)); | ||
508 | list_add_tail(&S2.list, &bcc_list); | ||
509 | list_splice_tail(seedlist, &bcc_list); | ||
510 | drbg_string_fill(&S4, pad, padlen); | ||
511 | list_add_tail(&S4.list, &bcc_list); | ||
512 | |||
513 | /* 10.4.2 step 9 */ | ||
514 | while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) { | ||
515 | /* | ||
516 | * 10.4.2 step 9.1 - the padding is implicit as the buffer | ||
517 | * holds zeros after allocation -- even the increment of i | ||
518 | * is irrelevant as the increment remains within length of i | ||
519 | */ | ||
520 | drbg_int2byte(iv, i, 4); | ||
521 | /* 10.4.2 step 9.2 -- BCC and concatenation with temp */ | ||
522 | ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list); | ||
523 | if (ret) | ||
524 | goto out; | ||
525 | /* 10.4.2 step 9.3 */ | ||
526 | i++; | ||
527 | templen += drbg_blocklen(drbg); | ||
528 | } | ||
529 | |||
530 | /* 10.4.2 step 11 */ | ||
531 | X = temp + (drbg_keylen(drbg)); | ||
532 | drbg_string_fill(&cipherin, X, drbg_blocklen(drbg)); | ||
533 | |||
534 | /* 10.4.2 step 12: overwriting of outval is implemented in next step */ | ||
535 | |||
536 | /* 10.4.2 step 13 */ | ||
537 | while (generated_len < bytes_to_return) { | ||
538 | short blocklen = 0; | ||
539 | /* | ||
540 | * 10.4.2 step 13.1: the truncation of the key length is | ||
541 | * implicit as the key is only drbg_blocklen in size based on | ||
542 | * the implementation of the cipher function callback | ||
543 | */ | ||
544 | ret = drbg_kcapi_sym(drbg, temp, X, &cipherin); | ||
545 | if (ret) | ||
546 | goto out; | ||
547 | blocklen = (drbg_blocklen(drbg) < | ||
548 | (bytes_to_return - generated_len)) ? | ||
549 | drbg_blocklen(drbg) : | ||
550 | (bytes_to_return - generated_len); | ||
551 | /* 10.4.2 step 13.2 and 14 */ | ||
552 | memcpy(df_data + generated_len, X, blocklen); | ||
553 | generated_len += blocklen; | ||
554 | } | ||
555 | |||
556 | ret = 0; | ||
557 | |||
558 | out: | ||
559 | memset(iv, 0, drbg_blocklen(drbg)); | ||
560 | memset(temp, 0, drbg_statelen(drbg)); | ||
561 | memset(pad, 0, drbg_blocklen(drbg)); | ||
562 | return ret; | ||
563 | } | ||
564 | |||
565 | /* | ||
566 | * update function of CTR DRBG as defined in 10.2.1.2 | ||
567 | * | ||
568 | * The reseed variable has an enhanced meaning compared to the update | ||
569 | * functions of the other DRBGs as follows: | ||
570 | * 0 => initial seed from initialization | ||
571 | * 1 => reseed via drbg_seed | ||
572 | * 2 => first invocation from drbg_ctr_update when addtl is present. In | ||
573 | * this case, the df_data scratchpad is not deleted so that it is | ||
574 | * available for another calls to prevent calling the DF function | ||
575 | * again. | ||
576 | * 3 => second invocation from drbg_ctr_update. When the update function | ||
577 | * was called with addtl, the df_data memory already contains the | ||
578 | * DFed addtl information and we do not need to call DF again. | ||
579 | */ | ||
580 | static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, | ||
581 | int reseed) | ||
582 | { | ||
583 | int ret = -EFAULT; | ||
584 | /* 10.2.1.2 step 1 */ | ||
585 | unsigned char *temp = drbg->scratchpad; | ||
586 | unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) + | ||
587 | drbg_blocklen(drbg); | ||
588 | unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */ | ||
589 | unsigned int len = 0; | ||
590 | struct drbg_string cipherin; | ||
591 | unsigned char prefix = DRBG_PREFIX1; | ||
592 | |||
593 | memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); | ||
594 | if (3 > reseed) | ||
595 | memset(df_data, 0, drbg_statelen(drbg)); | ||
596 | |||
597 | /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */ | ||
598 | if (seed) { | ||
599 | ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed); | ||
600 | if (ret) | ||
601 | goto out; | ||
602 | } | ||
603 | |||
604 | drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg)); | ||
605 | /* | ||
606 | * 10.2.1.3.2 steps 2 and 3 are already covered as the allocation | ||
607 | * zeroizes all memory during initialization | ||
608 | */ | ||
609 | while (len < (drbg_statelen(drbg))) { | ||
610 | /* 10.2.1.2 step 2.1 */ | ||
611 | drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1); | ||
612 | /* | ||
613 | * 10.2.1.2 step 2.2 */ | ||
614 | ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin); | ||
615 | if (ret) | ||
616 | goto out; | ||
617 | /* 10.2.1.2 step 2.3 and 3 */ | ||
618 | len += drbg_blocklen(drbg); | ||
619 | } | ||
620 | |||
621 | /* 10.2.1.2 step 4 */ | ||
622 | temp_p = temp; | ||
623 | df_data_p = df_data; | ||
624 | for (len = 0; len < drbg_statelen(drbg); len++) { | ||
625 | *temp_p ^= *df_data_p; | ||
626 | df_data_p++; temp_p++; | ||
627 | } | ||
628 | |||
629 | /* 10.2.1.2 step 5 */ | ||
630 | memcpy(drbg->C, temp, drbg_keylen(drbg)); | ||
631 | /* 10.2.1.2 step 6 */ | ||
632 | memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg)); | ||
633 | ret = 0; | ||
634 | |||
635 | out: | ||
636 | memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); | ||
637 | if (2 != reseed) | ||
638 | memset(df_data, 0, drbg_statelen(drbg)); | ||
639 | return ret; | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * scratchpad use: drbg_ctr_update is called independently from | ||
644 | * drbg_ctr_extract_bytes. Therefore, the scratchpad is reused | ||
645 | */ | ||
646 | /* Generate function of CTR DRBG as defined in 10.2.1.5.2 */ | ||
647 | static int drbg_ctr_generate(struct drbg_state *drbg, | ||
648 | unsigned char *buf, unsigned int buflen, | ||
649 | struct list_head *addtl) | ||
650 | { | ||
651 | int len = 0; | ||
652 | int ret = 0; | ||
653 | struct drbg_string data; | ||
654 | unsigned char prefix = DRBG_PREFIX1; | ||
655 | |||
656 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); | ||
657 | |||
658 | /* 10.2.1.5.2 step 2 */ | ||
659 | if (addtl && !list_empty(addtl)) { | ||
660 | ret = drbg_ctr_update(drbg, addtl, 2); | ||
661 | if (ret) | ||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | /* 10.2.1.5.2 step 4.1 */ | ||
666 | drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1); | ||
667 | drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg)); | ||
668 | while (len < buflen) { | ||
669 | int outlen = 0; | ||
670 | /* 10.2.1.5.2 step 4.2 */ | ||
671 | ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data); | ||
672 | if (ret) { | ||
673 | len = ret; | ||
674 | goto out; | ||
675 | } | ||
676 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? | ||
677 | drbg_blocklen(drbg) : (buflen - len); | ||
678 | if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) { | ||
679 | /* 10.2.1.5.2 step 6 */ | ||
680 | drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1); | ||
681 | continue; | ||
682 | } | ||
683 | /* 10.2.1.5.2 step 4.3 */ | ||
684 | memcpy(buf + len, drbg->scratchpad, outlen); | ||
685 | len += outlen; | ||
686 | /* 10.2.1.5.2 step 6 */ | ||
687 | if (len < buflen) | ||
688 | drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1); | ||
689 | } | ||
690 | |||
691 | /* 10.2.1.5.2 step 6 */ | ||
692 | ret = drbg_ctr_update(drbg, NULL, 3); | ||
693 | if (ret) | ||
694 | len = ret; | ||
695 | |||
696 | out: | ||
697 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); | ||
698 | return len; | ||
699 | } | ||
700 | |||
701 | static struct drbg_state_ops drbg_ctr_ops = { | ||
702 | .update = drbg_ctr_update, | ||
703 | .generate = drbg_ctr_generate, | ||
704 | .crypto_init = drbg_init_sym_kernel, | ||
705 | .crypto_fini = drbg_fini_sym_kernel, | ||
706 | }; | ||
707 | #endif /* CONFIG_CRYPTO_DRBG_CTR */ | ||
708 | |||
709 | /****************************************************************** | ||
710 | * HMAC DRBG callback functions | ||
711 | ******************************************************************/ | ||
712 | |||
713 | #if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC) | ||
714 | static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key, | ||
715 | unsigned char *outval, const struct list_head *in); | ||
716 | static int drbg_init_hash_kernel(struct drbg_state *drbg); | ||
717 | static int drbg_fini_hash_kernel(struct drbg_state *drbg); | ||
718 | #endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ | ||
719 | |||
720 | #ifdef CONFIG_CRYPTO_DRBG_HMAC | ||
721 | #define CRYPTO_DRBG_HMAC_STRING "HMAC " | ||
722 | /* update function of HMAC DRBG as defined in 10.1.2.2 */ | ||
723 | static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed, | ||
724 | int reseed) | ||
725 | { | ||
726 | int ret = -EFAULT; | ||
727 | int i = 0; | ||
728 | struct drbg_string seed1, seed2, vdata; | ||
729 | LIST_HEAD(seedlist); | ||
730 | LIST_HEAD(vdatalist); | ||
731 | |||
732 | if (!reseed) { | ||
733 | /* 10.1.2.3 step 2 */ | ||
734 | memset(drbg->C, 0, drbg_statelen(drbg)); | ||
735 | memset(drbg->V, 1, drbg_statelen(drbg)); | ||
736 | } | ||
737 | |||
738 | drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg)); | ||
739 | list_add_tail(&seed1.list, &seedlist); | ||
740 | /* buffer of seed2 will be filled in for loop below with one byte */ | ||
741 | drbg_string_fill(&seed2, NULL, 1); | ||
742 | list_add_tail(&seed2.list, &seedlist); | ||
743 | /* input data of seed is allowed to be NULL at this point */ | ||
744 | if (seed) | ||
745 | list_splice_tail(seed, &seedlist); | ||
746 | |||
747 | drbg_string_fill(&vdata, drbg->V, drbg_statelen(drbg)); | ||
748 | list_add_tail(&vdata.list, &vdatalist); | ||
749 | for (i = 2; 0 < i; i--) { | ||
750 | /* first round uses 0x0, second 0x1 */ | ||
751 | unsigned char prefix = DRBG_PREFIX0; | ||
752 | if (1 == i) | ||
753 | prefix = DRBG_PREFIX1; | ||
754 | /* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */ | ||
755 | seed2.buf = &prefix; | ||
756 | ret = drbg_kcapi_hash(drbg, drbg->C, drbg->C, &seedlist); | ||
757 | if (ret) | ||
758 | return ret; | ||
759 | |||
760 | /* 10.1.2.2 step 2 and 5 -- HMAC for V */ | ||
761 | ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &vdatalist); | ||
762 | if (ret) | ||
763 | return ret; | ||
764 | |||
765 | /* 10.1.2.2 step 3 */ | ||
766 | if (!seed) | ||
767 | return ret; | ||
768 | } | ||
769 | |||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | /* generate function of HMAC DRBG as defined in 10.1.2.5 */ | ||
774 | static int drbg_hmac_generate(struct drbg_state *drbg, | ||
775 | unsigned char *buf, | ||
776 | unsigned int buflen, | ||
777 | struct list_head *addtl) | ||
778 | { | ||
779 | int len = 0; | ||
780 | int ret = 0; | ||
781 | struct drbg_string data; | ||
782 | LIST_HEAD(datalist); | ||
783 | |||
784 | /* 10.1.2.5 step 2 */ | ||
785 | if (addtl && !list_empty(addtl)) { | ||
786 | ret = drbg_hmac_update(drbg, addtl, 1); | ||
787 | if (ret) | ||
788 | return ret; | ||
789 | } | ||
790 | |||
791 | drbg_string_fill(&data, drbg->V, drbg_statelen(drbg)); | ||
792 | list_add_tail(&data.list, &datalist); | ||
793 | while (len < buflen) { | ||
794 | unsigned int outlen = 0; | ||
795 | /* 10.1.2.5 step 4.1 */ | ||
796 | ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &datalist); | ||
797 | if (ret) | ||
798 | return ret; | ||
799 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? | ||
800 | drbg_blocklen(drbg) : (buflen - len); | ||
801 | if (!drbg_fips_continuous_test(drbg, drbg->V)) | ||
802 | continue; | ||
803 | |||
804 | /* 10.1.2.5 step 4.2 */ | ||
805 | memcpy(buf + len, drbg->V, outlen); | ||
806 | len += outlen; | ||
807 | } | ||
808 | |||
809 | /* 10.1.2.5 step 6 */ | ||
810 | if (addtl && !list_empty(addtl)) | ||
811 | ret = drbg_hmac_update(drbg, addtl, 1); | ||
812 | else | ||
813 | ret = drbg_hmac_update(drbg, NULL, 1); | ||
814 | if (ret) | ||
815 | return ret; | ||
816 | |||
817 | return len; | ||
818 | } | ||
819 | |||
820 | static struct drbg_state_ops drbg_hmac_ops = { | ||
821 | .update = drbg_hmac_update, | ||
822 | .generate = drbg_hmac_generate, | ||
823 | .crypto_init = drbg_init_hash_kernel, | ||
824 | .crypto_fini = drbg_fini_hash_kernel, | ||
825 | |||
826 | }; | ||
827 | #endif /* CONFIG_CRYPTO_DRBG_HMAC */ | ||
828 | |||
829 | /****************************************************************** | ||
830 | * Hash DRBG callback functions | ||
831 | ******************************************************************/ | ||
832 | |||
833 | #ifdef CONFIG_CRYPTO_DRBG_HASH | ||
834 | #define CRYPTO_DRBG_HASH_STRING "HASH " | ||
835 | /* | ||
836 | * scratchpad usage: as drbg_hash_update and drbg_hash_df are used | ||
837 | * interlinked, the scratchpad is used as follows: | ||
838 | * drbg_hash_update | ||
839 | * start: drbg->scratchpad | ||
840 | * length: drbg_statelen(drbg) | ||
841 | * drbg_hash_df: | ||
842 | * start: drbg->scratchpad + drbg_statelen(drbg) | ||
843 | * length: drbg_blocklen(drbg) | ||
844 | * | ||
845 | * drbg_hash_process_addtl uses the scratchpad, but fully completes | ||
846 | * before either of the functions mentioned before are invoked. Therefore, | ||
847 | * drbg_hash_process_addtl does not need to be specifically considered. | ||
848 | */ | ||
849 | |||
850 | /* Derivation Function for Hash DRBG as defined in 10.4.1 */ | ||
851 | static int drbg_hash_df(struct drbg_state *drbg, | ||
852 | unsigned char *outval, size_t outlen, | ||
853 | struct list_head *entropylist) | ||
854 | { | ||
855 | int ret = 0; | ||
856 | size_t len = 0; | ||
857 | unsigned char input[5]; | ||
858 | unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg); | ||
859 | struct drbg_string data; | ||
860 | |||
861 | memset(tmp, 0, drbg_blocklen(drbg)); | ||
862 | |||
863 | /* 10.4.1 step 3 */ | ||
864 | input[0] = 1; | ||
865 | drbg_int2byte(&input[1], (outlen * 8), 4); | ||
866 | |||
867 | /* 10.4.1 step 4.1 -- concatenation of data for input into hash */ | ||
868 | drbg_string_fill(&data, input, 5); | ||
869 | list_add(&data.list, entropylist); | ||
870 | |||
871 | /* 10.4.1 step 4 */ | ||
872 | while (len < outlen) { | ||
873 | short blocklen = 0; | ||
874 | /* 10.4.1 step 4.1 */ | ||
875 | ret = drbg_kcapi_hash(drbg, NULL, tmp, entropylist); | ||
876 | if (ret) | ||
877 | goto out; | ||
878 | /* 10.4.1 step 4.2 */ | ||
879 | input[0]++; | ||
880 | blocklen = (drbg_blocklen(drbg) < (outlen - len)) ? | ||
881 | drbg_blocklen(drbg) : (outlen - len); | ||
882 | memcpy(outval + len, tmp, blocklen); | ||
883 | len += blocklen; | ||
884 | } | ||
885 | |||
886 | out: | ||
887 | memset(tmp, 0, drbg_blocklen(drbg)); | ||
888 | return ret; | ||
889 | } | ||
890 | |||
891 | /* update function for Hash DRBG as defined in 10.1.1.2 / 10.1.1.3 */ | ||
892 | static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed, | ||
893 | int reseed) | ||
894 | { | ||
895 | int ret = 0; | ||
896 | struct drbg_string data1, data2; | ||
897 | LIST_HEAD(datalist); | ||
898 | LIST_HEAD(datalist2); | ||
899 | unsigned char *V = drbg->scratchpad; | ||
900 | unsigned char prefix = DRBG_PREFIX1; | ||
901 | |||
902 | memset(drbg->scratchpad, 0, drbg_statelen(drbg)); | ||
903 | if (!seed) | ||
904 | return -EINVAL; | ||
905 | |||
906 | if (reseed) { | ||
907 | /* 10.1.1.3 step 1 */ | ||
908 | memcpy(V, drbg->V, drbg_statelen(drbg)); | ||
909 | drbg_string_fill(&data1, &prefix, 1); | ||
910 | list_add_tail(&data1.list, &datalist); | ||
911 | drbg_string_fill(&data2, V, drbg_statelen(drbg)); | ||
912 | list_add_tail(&data2.list, &datalist); | ||
913 | } | ||
914 | list_splice_tail(seed, &datalist); | ||
915 | |||
916 | /* 10.1.1.2 / 10.1.1.3 step 2 and 3 */ | ||
917 | ret = drbg_hash_df(drbg, drbg->V, drbg_statelen(drbg), &datalist); | ||
918 | if (ret) | ||
919 | goto out; | ||
920 | |||
921 | /* 10.1.1.2 / 10.1.1.3 step 4 */ | ||
922 | prefix = DRBG_PREFIX0; | ||
923 | drbg_string_fill(&data1, &prefix, 1); | ||
924 | list_add_tail(&data1.list, &datalist2); | ||
925 | drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg)); | ||
926 | list_add_tail(&data2.list, &datalist2); | ||
927 | /* 10.1.1.2 / 10.1.1.3 step 4 */ | ||
928 | ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2); | ||
929 | |||
930 | out: | ||
931 | memset(drbg->scratchpad, 0, drbg_statelen(drbg)); | ||
932 | return ret; | ||
933 | } | ||
934 | |||
935 | /* processing of additional information string for Hash DRBG */ | ||
936 | static int drbg_hash_process_addtl(struct drbg_state *drbg, | ||
937 | struct list_head *addtl) | ||
938 | { | ||
939 | int ret = 0; | ||
940 | struct drbg_string data1, data2; | ||
941 | LIST_HEAD(datalist); | ||
942 | unsigned char prefix = DRBG_PREFIX2; | ||
943 | |||
944 | /* this is value w as per documentation */ | ||
945 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); | ||
946 | |||
947 | /* 10.1.1.4 step 2 */ | ||
948 | if (!addtl || list_empty(addtl)) | ||
949 | return 0; | ||
950 | |||
951 | /* 10.1.1.4 step 2a */ | ||
952 | drbg_string_fill(&data1, &prefix, 1); | ||
953 | drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg)); | ||
954 | list_add_tail(&data1.list, &datalist); | ||
955 | list_add_tail(&data2.list, &datalist); | ||
956 | list_splice_tail(addtl, &datalist); | ||
957 | ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist); | ||
958 | if (ret) | ||
959 | goto out; | ||
960 | |||
961 | /* 10.1.1.4 step 2b */ | ||
962 | drbg_add_buf(drbg->V, drbg_statelen(drbg), | ||
963 | drbg->scratchpad, drbg_blocklen(drbg)); | ||
964 | |||
965 | out: | ||
966 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); | ||
967 | return ret; | ||
968 | } | ||
969 | |||
970 | /* Hashgen defined in 10.1.1.4 */ | ||
971 | static int drbg_hash_hashgen(struct drbg_state *drbg, | ||
972 | unsigned char *buf, | ||
973 | unsigned int buflen) | ||
974 | { | ||
975 | int len = 0; | ||
976 | int ret = 0; | ||
977 | unsigned char *src = drbg->scratchpad; | ||
978 | unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg); | ||
979 | struct drbg_string data; | ||
980 | LIST_HEAD(datalist); | ||
981 | unsigned char prefix = DRBG_PREFIX1; | ||
982 | |||
983 | memset(src, 0, drbg_statelen(drbg)); | ||
984 | memset(dst, 0, drbg_blocklen(drbg)); | ||
985 | |||
986 | /* 10.1.1.4 step hashgen 2 */ | ||
987 | memcpy(src, drbg->V, drbg_statelen(drbg)); | ||
988 | |||
989 | drbg_string_fill(&data, src, drbg_statelen(drbg)); | ||
990 | list_add_tail(&data.list, &datalist); | ||
991 | while (len < buflen) { | ||
992 | unsigned int outlen = 0; | ||
993 | /* 10.1.1.4 step hashgen 4.1 */ | ||
994 | ret = drbg_kcapi_hash(drbg, NULL, dst, &datalist); | ||
995 | if (ret) { | ||
996 | len = ret; | ||
997 | goto out; | ||
998 | } | ||
999 | outlen = (drbg_blocklen(drbg) < (buflen - len)) ? | ||
1000 | drbg_blocklen(drbg) : (buflen - len); | ||
1001 | if (!drbg_fips_continuous_test(drbg, dst)) { | ||
1002 | drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1); | ||
1003 | continue; | ||
1004 | } | ||
1005 | /* 10.1.1.4 step hashgen 4.2 */ | ||
1006 | memcpy(buf + len, dst, outlen); | ||
1007 | len += outlen; | ||
1008 | /* 10.1.1.4 hashgen step 4.3 */ | ||
1009 | if (len < buflen) | ||
1010 | drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1); | ||
1011 | } | ||
1012 | |||
1013 | out: | ||
1014 | memset(drbg->scratchpad, 0, | ||
1015 | (drbg_statelen(drbg) + drbg_blocklen(drbg))); | ||
1016 | return len; | ||
1017 | } | ||
1018 | |||
1019 | /* generate function for Hash DRBG as defined in 10.1.1.4 */ | ||
1020 | static int drbg_hash_generate(struct drbg_state *drbg, | ||
1021 | unsigned char *buf, unsigned int buflen, | ||
1022 | struct list_head *addtl) | ||
1023 | { | ||
1024 | int len = 0; | ||
1025 | int ret = 0; | ||
1026 | unsigned char req[8]; | ||
1027 | unsigned char prefix = DRBG_PREFIX3; | ||
1028 | struct drbg_string data1, data2; | ||
1029 | LIST_HEAD(datalist); | ||
1030 | |||
1031 | /* 10.1.1.4 step 2 */ | ||
1032 | ret = drbg_hash_process_addtl(drbg, addtl); | ||
1033 | if (ret) | ||
1034 | return ret; | ||
1035 | /* 10.1.1.4 step 3 */ | ||
1036 | len = drbg_hash_hashgen(drbg, buf, buflen); | ||
1037 | |||
1038 | /* this is the value H as documented in 10.1.1.4 */ | ||
1039 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); | ||
1040 | /* 10.1.1.4 step 4 */ | ||
1041 | drbg_string_fill(&data1, &prefix, 1); | ||
1042 | list_add_tail(&data1.list, &datalist); | ||
1043 | drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg)); | ||
1044 | list_add_tail(&data2.list, &datalist); | ||
1045 | ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist); | ||
1046 | if (ret) { | ||
1047 | len = ret; | ||
1048 | goto out; | ||
1049 | } | ||
1050 | |||
1051 | /* 10.1.1.4 step 5 */ | ||
1052 | drbg_add_buf(drbg->V, drbg_statelen(drbg), | ||
1053 | drbg->scratchpad, drbg_blocklen(drbg)); | ||
1054 | drbg_add_buf(drbg->V, drbg_statelen(drbg), | ||
1055 | drbg->C, drbg_statelen(drbg)); | ||
1056 | drbg_int2byte(req, drbg->reseed_ctr, sizeof(req)); | ||
1057 | drbg_add_buf(drbg->V, drbg_statelen(drbg), req, 8); | ||
1058 | |||
1059 | out: | ||
1060 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); | ||
1061 | return len; | ||
1062 | } | ||
1063 | |||
1064 | /* | ||
1065 | * scratchpad usage: as update and generate are used isolated, both | ||
1066 | * can use the scratchpad | ||
1067 | */ | ||
1068 | static struct drbg_state_ops drbg_hash_ops = { | ||
1069 | .update = drbg_hash_update, | ||
1070 | .generate = drbg_hash_generate, | ||
1071 | .crypto_init = drbg_init_hash_kernel, | ||
1072 | .crypto_fini = drbg_fini_hash_kernel, | ||
1073 | }; | ||
1074 | #endif /* CONFIG_CRYPTO_DRBG_HASH */ | ||
1075 | |||
1076 | /****************************************************************** | ||
1077 | * Functions common for DRBG implementations | ||
1078 | ******************************************************************/ | ||
1079 | |||
1080 | /* | ||
1081 | * Seeding or reseeding of the DRBG | ||
1082 | * | ||
1083 | * @drbg: DRBG state struct | ||
1084 | * @pers: personalization / additional information buffer | ||
1085 | * @reseed: 0 for initial seed process, 1 for reseeding | ||
1086 | * | ||
1087 | * return: | ||
1088 | * 0 on success | ||
1089 | * error value otherwise | ||
1090 | */ | ||
1091 | static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | ||
1092 | bool reseed) | ||
1093 | { | ||
1094 | int ret = 0; | ||
1095 | unsigned char *entropy = NULL; | ||
1096 | size_t entropylen = 0; | ||
1097 | struct drbg_string data1; | ||
1098 | LIST_HEAD(seedlist); | ||
1099 | |||
1100 | /* 9.1 / 9.2 / 9.3.1 step 3 */ | ||
1101 | if (pers && pers->len > (drbg_max_addtl(drbg))) { | ||
1102 | pr_devel("DRBG: personalization string too long %zu\n", | ||
1103 | pers->len); | ||
1104 | return -EINVAL; | ||
1105 | } | ||
1106 | |||
1107 | if (drbg->test_data && drbg->test_data->testentropy) { | ||
1108 | drbg_string_fill(&data1, drbg->test_data->testentropy->buf, | ||
1109 | drbg->test_data->testentropy->len); | ||
1110 | pr_devel("DRBG: using test entropy\n"); | ||
1111 | } else { | ||
1112 | /* | ||
1113 | * Gather entropy equal to the security strength of the DRBG. | ||
1114 | * With a derivation function, a nonce is required in addition | ||
1115 | * to the entropy. A nonce must be at least 1/2 of the security | ||
1116 | * strength of the DRBG in size. Thus, entropy * nonce is 3/2 | ||
1117 | * of the strength. The consideration of a nonce is only | ||
1118 | * applicable during initial seeding. | ||
1119 | */ | ||
1120 | entropylen = drbg_sec_strength(drbg->core->flags); | ||
1121 | if (!entropylen) | ||
1122 | return -EFAULT; | ||
1123 | if (!reseed) | ||
1124 | entropylen = ((entropylen + 1) / 2) * 3; | ||
1125 | pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n", | ||
1126 | entropylen); | ||
1127 | entropy = kzalloc(entropylen, GFP_KERNEL); | ||
1128 | if (!entropy) | ||
1129 | return -ENOMEM; | ||
1130 | get_random_bytes(entropy, entropylen); | ||
1131 | drbg_string_fill(&data1, entropy, entropylen); | ||
1132 | } | ||
1133 | list_add_tail(&data1.list, &seedlist); | ||
1134 | |||
1135 | /* | ||
1136 | * concatenation of entropy with personalization str / addtl input) | ||
1137 | * the variable pers is directly handed in by the caller, so check its | ||
1138 | * contents whether it is appropriate | ||
1139 | */ | ||
1140 | if (pers && pers->buf && 0 < pers->len) { | ||
1141 | list_add_tail(&pers->list, &seedlist); | ||
1142 | pr_devel("DRBG: using personalization string\n"); | ||
1143 | } | ||
1144 | |||
1145 | ret = drbg->d_ops->update(drbg, &seedlist, reseed); | ||
1146 | if (ret) | ||
1147 | goto out; | ||
1148 | |||
1149 | drbg->seeded = true; | ||
1150 | /* 10.1.1.2 / 10.1.1.3 step 5 */ | ||
1151 | drbg->reseed_ctr = 1; | ||
1152 | |||
1153 | out: | ||
1154 | if (entropy) | ||
1155 | kzfree(entropy); | ||
1156 | return ret; | ||
1157 | } | ||
1158 | |||
1159 | /* Free all substructures in a DRBG state without the DRBG state structure */ | ||
1160 | static inline void drbg_dealloc_state(struct drbg_state *drbg) | ||
1161 | { | ||
1162 | if (!drbg) | ||
1163 | return; | ||
1164 | if (drbg->V) | ||
1165 | kzfree(drbg->V); | ||
1166 | drbg->V = NULL; | ||
1167 | if (drbg->C) | ||
1168 | kzfree(drbg->C); | ||
1169 | drbg->C = NULL; | ||
1170 | if (drbg->scratchpad) | ||
1171 | kzfree(drbg->scratchpad); | ||
1172 | drbg->scratchpad = NULL; | ||
1173 | drbg->reseed_ctr = 0; | ||
1174 | #ifdef CONFIG_CRYPTO_FIPS | ||
1175 | if (drbg->prev) | ||
1176 | kzfree(drbg->prev); | ||
1177 | drbg->prev = NULL; | ||
1178 | drbg->fips_primed = false; | ||
1179 | #endif | ||
1180 | } | ||
1181 | |||
1182 | /* | ||
1183 | * Allocate all sub-structures for a DRBG state. | ||
1184 | * The DRBG state structure must already be allocated. | ||
1185 | */ | ||
1186 | static inline int drbg_alloc_state(struct drbg_state *drbg) | ||
1187 | { | ||
1188 | int ret = -ENOMEM; | ||
1189 | unsigned int sb_size = 0; | ||
1190 | |||
1191 | if (!drbg) | ||
1192 | return -EINVAL; | ||
1193 | |||
1194 | drbg->V = kzalloc(drbg_statelen(drbg), GFP_KERNEL); | ||
1195 | if (!drbg->V) | ||
1196 | goto err; | ||
1197 | drbg->C = kzalloc(drbg_statelen(drbg), GFP_KERNEL); | ||
1198 | if (!drbg->C) | ||
1199 | goto err; | ||
1200 | #ifdef CONFIG_CRYPTO_FIPS | ||
1201 | drbg->prev = kzalloc(drbg_blocklen(drbg), GFP_KERNEL); | ||
1202 | if (!drbg->prev) | ||
1203 | goto err; | ||
1204 | drbg->fips_primed = false; | ||
1205 | #endif | ||
1206 | /* scratchpad is only generated for CTR and Hash */ | ||
1207 | if (drbg->core->flags & DRBG_HMAC) | ||
1208 | sb_size = 0; | ||
1209 | else if (drbg->core->flags & DRBG_CTR) | ||
1210 | sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */ | ||
1211 | drbg_statelen(drbg) + /* df_data */ | ||
1212 | drbg_blocklen(drbg) + /* pad */ | ||
1213 | drbg_blocklen(drbg) + /* iv */ | ||
1214 | drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */ | ||
1215 | else | ||
1216 | sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg); | ||
1217 | |||
1218 | if (0 < sb_size) { | ||
1219 | drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL); | ||
1220 | if (!drbg->scratchpad) | ||
1221 | goto err; | ||
1222 | } | ||
1223 | spin_lock_init(&drbg->drbg_lock); | ||
1224 | return 0; | ||
1225 | |||
1226 | err: | ||
1227 | drbg_dealloc_state(drbg); | ||
1228 | return ret; | ||
1229 | } | ||
1230 | |||
1231 | /* | ||
1232 | * Strategy to avoid holding long term locks: generate a shadow copy of DRBG | ||
1233 | * and perform all operations on this shadow copy. After finishing, restore | ||
1234 | * the updated state of the shadow copy into original drbg state. This way, | ||
1235 | * only the read and write operations of the original drbg state must be | ||
1236 | * locked | ||
1237 | */ | ||
1238 | static inline void drbg_copy_drbg(struct drbg_state *src, | ||
1239 | struct drbg_state *dst) | ||
1240 | { | ||
1241 | if (!src || !dst) | ||
1242 | return; | ||
1243 | memcpy(dst->V, src->V, drbg_statelen(src)); | ||
1244 | memcpy(dst->C, src->C, drbg_statelen(src)); | ||
1245 | dst->reseed_ctr = src->reseed_ctr; | ||
1246 | dst->seeded = src->seeded; | ||
1247 | dst->pr = src->pr; | ||
1248 | #ifdef CONFIG_CRYPTO_FIPS | ||
1249 | dst->fips_primed = src->fips_primed; | ||
1250 | memcpy(dst->prev, src->prev, drbg_blocklen(src)); | ||
1251 | #endif | ||
1252 | /* | ||
1253 | * Not copied: | ||
1254 | * scratchpad is initialized drbg_alloc_state; | ||
1255 | * priv_data is initialized with call to crypto_init; | ||
1256 | * d_ops and core are set outside, as these parameters are const; | ||
1257 | * test_data is set outside to prevent it being copied back. | ||
1258 | */ | ||
1259 | } | ||
1260 | |||
1261 | static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow) | ||
1262 | { | ||
1263 | int ret = -ENOMEM; | ||
1264 | struct drbg_state *tmp = NULL; | ||
1265 | |||
1266 | if (!drbg || !drbg->core || !drbg->V || !drbg->C) { | ||
1267 | pr_devel("DRBG: attempt to generate shadow copy for " | ||
1268 | "uninitialized DRBG state rejected\n"); | ||
1269 | return -EINVAL; | ||
1270 | } | ||
1271 | /* HMAC does not have a scratchpad */ | ||
1272 | if (!(drbg->core->flags & DRBG_HMAC) && NULL == drbg->scratchpad) | ||
1273 | return -EINVAL; | ||
1274 | |||
1275 | tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); | ||
1276 | if (!tmp) | ||
1277 | return -ENOMEM; | ||
1278 | |||
1279 | /* read-only data as they are defined as const, no lock needed */ | ||
1280 | tmp->core = drbg->core; | ||
1281 | tmp->d_ops = drbg->d_ops; | ||
1282 | |||
1283 | ret = drbg_alloc_state(tmp); | ||
1284 | if (ret) | ||
1285 | goto err; | ||
1286 | |||
1287 | spin_lock_bh(&drbg->drbg_lock); | ||
1288 | drbg_copy_drbg(drbg, tmp); | ||
1289 | /* only make a link to the test buffer, as we only read that data */ | ||
1290 | tmp->test_data = drbg->test_data; | ||
1291 | spin_unlock_bh(&drbg->drbg_lock); | ||
1292 | *shadow = tmp; | ||
1293 | return 0; | ||
1294 | |||
1295 | err: | ||
1296 | if (tmp) | ||
1297 | kzfree(tmp); | ||
1298 | return ret; | ||
1299 | } | ||
1300 | |||
1301 | static void drbg_restore_shadow(struct drbg_state *drbg, | ||
1302 | struct drbg_state **shadow) | ||
1303 | { | ||
1304 | struct drbg_state *tmp = *shadow; | ||
1305 | |||
1306 | spin_lock_bh(&drbg->drbg_lock); | ||
1307 | drbg_copy_drbg(tmp, drbg); | ||
1308 | spin_unlock_bh(&drbg->drbg_lock); | ||
1309 | drbg_dealloc_state(tmp); | ||
1310 | kzfree(tmp); | ||
1311 | *shadow = NULL; | ||
1312 | } | ||
1313 | |||
1314 | /************************************************************************* | ||
1315 | * DRBG interface functions | ||
1316 | *************************************************************************/ | ||
1317 | |||
1318 | /* | ||
1319 | * DRBG generate function as required by SP800-90A - this function | ||
1320 | * generates random numbers | ||
1321 | * | ||
1322 | * @drbg DRBG state handle | ||
1323 | * @buf Buffer where to store the random numbers -- the buffer must already | ||
1324 | * be pre-allocated by caller | ||
1325 | * @buflen Length of output buffer - this value defines the number of random | ||
1326 | * bytes pulled from DRBG | ||
1327 | * @addtl Additional input that is mixed into state, may be NULL -- note | ||
1328 | * the entropy is pulled by the DRBG internally unconditionally | ||
1329 | * as defined in SP800-90A. The additional input is mixed into | ||
1330 | * the state in addition to the pulled entropy. | ||
1331 | * | ||
1332 | * return: generated number of bytes | ||
1333 | */ | ||
1334 | static int drbg_generate(struct drbg_state *drbg, | ||
1335 | unsigned char *buf, unsigned int buflen, | ||
1336 | struct drbg_string *addtl) | ||
1337 | { | ||
1338 | int len = 0; | ||
1339 | struct drbg_state *shadow = NULL; | ||
1340 | LIST_HEAD(addtllist); | ||
1341 | struct drbg_string timestamp; | ||
1342 | union { | ||
1343 | cycles_t cycles; | ||
1344 | unsigned char char_cycles[sizeof(cycles_t)]; | ||
1345 | } now; | ||
1346 | |||
1347 | if (0 == buflen || !buf) { | ||
1348 | pr_devel("DRBG: no output buffer provided\n"); | ||
1349 | return -EINVAL; | ||
1350 | } | ||
1351 | if (addtl && NULL == addtl->buf && 0 < addtl->len) { | ||
1352 | pr_devel("DRBG: wrong format of additional information\n"); | ||
1353 | return -EINVAL; | ||
1354 | } | ||
1355 | |||
1356 | len = drbg_make_shadow(drbg, &shadow); | ||
1357 | if (len) { | ||
1358 | pr_devel("DRBG: shadow copy cannot be generated\n"); | ||
1359 | return len; | ||
1360 | } | ||
1361 | |||
1362 | /* 9.3.1 step 2 */ | ||
1363 | len = -EINVAL; | ||
1364 | if (buflen > (drbg_max_request_bytes(shadow))) { | ||
1365 | pr_devel("DRBG: requested random numbers too large %u\n", | ||
1366 | buflen); | ||
1367 | goto err; | ||
1368 | } | ||
1369 | |||
1370 | /* 9.3.1 step 3 is implicit with the chosen DRBG */ | ||
1371 | |||
1372 | /* 9.3.1 step 4 */ | ||
1373 | if (addtl && addtl->len > (drbg_max_addtl(shadow))) { | ||
1374 | pr_devel("DRBG: additional information string too long %zu\n", | ||
1375 | addtl->len); | ||
1376 | goto err; | ||
1377 | } | ||
1378 | /* 9.3.1 step 5 is implicit with the chosen DRBG */ | ||
1379 | |||
1380 | /* | ||
1381 | * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented | ||
1382 | * here. The spec is a bit convoluted here, we make it simpler. | ||
1383 | */ | ||
1384 | if ((drbg_max_requests(shadow)) < shadow->reseed_ctr) | ||
1385 | shadow->seeded = false; | ||
1386 | |||
1387 | /* allocate cipher handle */ | ||
1388 | if (shadow->d_ops->crypto_init) { | ||
1389 | len = shadow->d_ops->crypto_init(shadow); | ||
1390 | if (len) | ||
1391 | goto err; | ||
1392 | } | ||
1393 | |||
1394 | if (shadow->pr || !shadow->seeded) { | ||
1395 | pr_devel("DRBG: reseeding before generation (prediction " | ||
1396 | "resistance: %s, state %s)\n", | ||
1397 | drbg->pr ? "true" : "false", | ||
1398 | drbg->seeded ? "seeded" : "unseeded"); | ||
1399 | /* 9.3.1 steps 7.1 through 7.3 */ | ||
1400 | len = drbg_seed(shadow, addtl, true); | ||
1401 | if (len) | ||
1402 | goto err; | ||
1403 | /* 9.3.1 step 7.4 */ | ||
1404 | addtl = NULL; | ||
1405 | } | ||
1406 | |||
1407 | /* | ||
1408 | * Mix the time stamp into the DRBG state if the DRBG is not in | ||
1409 | * test mode. If there are two callers invoking the DRBG at the same | ||
1410 | * time, i.e. before the first caller merges its shadow state back, | ||
1411 | * both callers would obtain the same random number stream without | ||
1412 | * changing the state here. | ||
1413 | */ | ||
1414 | if (!drbg->test_data) { | ||
1415 | now.cycles = random_get_entropy(); | ||
1416 | drbg_string_fill(×tamp, now.char_cycles, sizeof(cycles_t)); | ||
1417 | list_add_tail(×tamp.list, &addtllist); | ||
1418 | } | ||
1419 | if (addtl && 0 < addtl->len) | ||
1420 | list_add_tail(&addtl->list, &addtllist); | ||
1421 | /* 9.3.1 step 8 and 10 */ | ||
1422 | len = shadow->d_ops->generate(shadow, buf, buflen, &addtllist); | ||
1423 | |||
1424 | /* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */ | ||
1425 | shadow->reseed_ctr++; | ||
1426 | if (0 >= len) | ||
1427 | goto err; | ||
1428 | |||
1429 | /* | ||
1430 | * Section 11.3.3 requires to re-perform self tests after some | ||
1431 | * generated random numbers. The chosen value after which self | ||
1432 | * test is performed is arbitrary, but it should be reasonable. | ||
1433 | * However, we do not perform the self tests because of the following | ||
1434 | * reasons: it is mathematically impossible that the initial self tests | ||
1435 | * were successfully and the following are not. If the initial would | ||
1436 | * pass and the following would not, the kernel integrity is violated. | ||
1437 | * In this case, the entire kernel operation is questionable and it | ||
1438 | * is unlikely that the integrity violation only affects the | ||
1439 | * correct operation of the DRBG. | ||
1440 | * | ||
1441 | * Albeit the following code is commented out, it is provided in | ||
1442 | * case somebody has a need to implement the test of 11.3.3. | ||
1443 | */ | ||
1444 | #if 0 | ||
1445 | if (shadow->reseed_ctr && !(shadow->reseed_ctr % 4096)) { | ||
1446 | int err = 0; | ||
1447 | pr_devel("DRBG: start to perform self test\n"); | ||
1448 | if (drbg->core->flags & DRBG_HMAC) | ||
1449 | err = alg_test("drbg_pr_hmac_sha256", | ||
1450 | "drbg_pr_hmac_sha256", 0, 0); | ||
1451 | else if (drbg->core->flags & DRBG_CTR) | ||
1452 | err = alg_test("drbg_pr_ctr_aes128", | ||
1453 | "drbg_pr_ctr_aes128", 0, 0); | ||
1454 | else | ||
1455 | err = alg_test("drbg_pr_sha256", | ||
1456 | "drbg_pr_sha256", 0, 0); | ||
1457 | if (err) { | ||
1458 | pr_err("DRBG: periodical self test failed\n"); | ||
1459 | /* | ||
1460 | * uninstantiate implies that from now on, only errors | ||
1461 | * are returned when reusing this DRBG cipher handle | ||
1462 | */ | ||
1463 | drbg_uninstantiate(drbg); | ||
1464 | drbg_dealloc_state(shadow); | ||
1465 | kzfree(shadow); | ||
1466 | return 0; | ||
1467 | } else { | ||
1468 | pr_devel("DRBG: self test successful\n"); | ||
1469 | } | ||
1470 | } | ||
1471 | #endif | ||
1472 | |||
1473 | err: | ||
1474 | if (shadow->d_ops->crypto_fini) | ||
1475 | shadow->d_ops->crypto_fini(shadow); | ||
1476 | drbg_restore_shadow(drbg, &shadow); | ||
1477 | return len; | ||
1478 | } | ||
1479 | |||
1480 | /* | ||
1481 | * Wrapper around drbg_generate which can pull arbitrary long strings | ||
1482 | * from the DRBG without hitting the maximum request limitation. | ||
1483 | * | ||
1484 | * Parameters: see drbg_generate | ||
1485 | * Return codes: see drbg_generate -- if one drbg_generate request fails, | ||
1486 | * the entire drbg_generate_long request fails | ||
1487 | */ | ||
1488 | static int drbg_generate_long(struct drbg_state *drbg, | ||
1489 | unsigned char *buf, unsigned int buflen, | ||
1490 | struct drbg_string *addtl) | ||
1491 | { | ||
1492 | int len = 0; | ||
1493 | unsigned int slice = 0; | ||
1494 | do { | ||
1495 | int tmplen = 0; | ||
1496 | unsigned int chunk = 0; | ||
1497 | slice = ((buflen - len) / drbg_max_request_bytes(drbg)); | ||
1498 | chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); | ||
1499 | tmplen = drbg_generate(drbg, buf + len, chunk, addtl); | ||
1500 | if (0 >= tmplen) | ||
1501 | return tmplen; | ||
1502 | len += tmplen; | ||
1503 | } while (slice > 0 && (len < buflen)); | ||
1504 | return len; | ||
1505 | } | ||
1506 | |||
1507 | /* | ||
1508 | * DRBG instantiation function as required by SP800-90A - this function | ||
1509 | * sets up the DRBG handle, performs the initial seeding and all sanity | ||
1510 | * checks required by SP800-90A | ||
1511 | * | ||
1512 | * @drbg memory of state -- if NULL, new memory is allocated | ||
1513 | * @pers Personalization string that is mixed into state, may be NULL -- note | ||
1514 | * the entropy is pulled by the DRBG internally unconditionally | ||
1515 | * as defined in SP800-90A. The additional input is mixed into | ||
1516 | * the state in addition to the pulled entropy. | ||
1517 | * @coreref reference to core | ||
1518 | * @pr prediction resistance enabled | ||
1519 | * | ||
1520 | * return | ||
1521 | * 0 on success | ||
1522 | * error value otherwise | ||
1523 | */ | ||
1524 | static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, | ||
1525 | int coreref, bool pr) | ||
1526 | { | ||
1527 | int ret = -ENOMEM; | ||
1528 | |||
1529 | pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " | ||
1530 | "%s\n", coreref, pr ? "enabled" : "disabled"); | ||
1531 | drbg->core = &drbg_cores[coreref]; | ||
1532 | drbg->pr = pr; | ||
1533 | drbg->seeded = false; | ||
1534 | switch (drbg->core->flags & DRBG_TYPE_MASK) { | ||
1535 | #ifdef CONFIG_CRYPTO_DRBG_HMAC | ||
1536 | case DRBG_HMAC: | ||
1537 | drbg->d_ops = &drbg_hmac_ops; | ||
1538 | break; | ||
1539 | #endif /* CONFIG_CRYPTO_DRBG_HMAC */ | ||
1540 | #ifdef CONFIG_CRYPTO_DRBG_HASH | ||
1541 | case DRBG_HASH: | ||
1542 | drbg->d_ops = &drbg_hash_ops; | ||
1543 | break; | ||
1544 | #endif /* CONFIG_CRYPTO_DRBG_HASH */ | ||
1545 | #ifdef CONFIG_CRYPTO_DRBG_CTR | ||
1546 | case DRBG_CTR: | ||
1547 | drbg->d_ops = &drbg_ctr_ops; | ||
1548 | break; | ||
1549 | #endif /* CONFIG_CRYPTO_DRBG_CTR */ | ||
1550 | default: | ||
1551 | return -EOPNOTSUPP; | ||
1552 | } | ||
1553 | |||
1554 | /* 9.1 step 1 is implicit with the selected DRBG type */ | ||
1555 | |||
1556 | /* | ||
1557 | * 9.1 step 2 is implicit as caller can select prediction resistance | ||
1558 | * and the flag is copied into drbg->flags -- | ||
1559 | * all DRBG types support prediction resistance | ||
1560 | */ | ||
1561 | |||
1562 | /* 9.1 step 4 is implicit in drbg_sec_strength */ | ||
1563 | |||
1564 | ret = drbg_alloc_state(drbg); | ||
1565 | if (ret) | ||
1566 | return ret; | ||
1567 | |||
1568 | ret = -EFAULT; | ||
1569 | if (drbg->d_ops->crypto_init && drbg->d_ops->crypto_init(drbg)) | ||
1570 | goto err; | ||
1571 | ret = drbg_seed(drbg, pers, false); | ||
1572 | if (drbg->d_ops->crypto_fini) | ||
1573 | drbg->d_ops->crypto_fini(drbg); | ||
1574 | if (ret) | ||
1575 | goto err; | ||
1576 | |||
1577 | return 0; | ||
1578 | |||
1579 | err: | ||
1580 | drbg_dealloc_state(drbg); | ||
1581 | return ret; | ||
1582 | } | ||
1583 | |||
1584 | /* | ||
1585 | * DRBG uninstantiate function as required by SP800-90A - this function | ||
1586 | * frees all buffers and the DRBG handle | ||
1587 | * | ||
1588 | * @drbg DRBG state handle | ||
1589 | * | ||
1590 | * return | ||
1591 | * 0 on success | ||
1592 | */ | ||
1593 | static int drbg_uninstantiate(struct drbg_state *drbg) | ||
1594 | { | ||
1595 | spin_lock_bh(&drbg->drbg_lock); | ||
1596 | drbg_dealloc_state(drbg); | ||
1597 | /* no scrubbing of test_data -- this shall survive an uninstantiate */ | ||
1598 | spin_unlock_bh(&drbg->drbg_lock); | ||
1599 | return 0; | ||
1600 | } | ||
1601 | |||
1602 | /* | ||
1603 | * Helper function for setting the test data in the DRBG | ||
1604 | * | ||
1605 | * @drbg DRBG state handle | ||
1606 | * @test_data test data to sets | ||
1607 | */ | ||
1608 | static inline void drbg_set_testdata(struct drbg_state *drbg, | ||
1609 | struct drbg_test_data *test_data) | ||
1610 | { | ||
1611 | if (!test_data || !test_data->testentropy) | ||
1612 | return; | ||
1613 | spin_lock_bh(&drbg->drbg_lock); | ||
1614 | drbg->test_data = test_data; | ||
1615 | spin_unlock_bh(&drbg->drbg_lock); | ||
1616 | } | ||
1617 | |||
1618 | /*************************************************************** | ||
1619 | * Kernel crypto API cipher invocations requested by DRBG | ||
1620 | ***************************************************************/ | ||
1621 | |||
1622 | #if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC) | ||
1623 | struct sdesc { | ||
1624 | struct shash_desc shash; | ||
1625 | char ctx[]; | ||
1626 | }; | ||
1627 | |||
1628 | static int drbg_init_hash_kernel(struct drbg_state *drbg) | ||
1629 | { | ||
1630 | struct sdesc *sdesc; | ||
1631 | struct crypto_shash *tfm; | ||
1632 | |||
1633 | tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0); | ||
1634 | if (IS_ERR(tfm)) { | ||
1635 | pr_info("DRBG: could not allocate digest TFM handle\n"); | ||
1636 | return PTR_ERR(tfm); | ||
1637 | } | ||
1638 | BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm)); | ||
1639 | sdesc = kzalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm), | ||
1640 | GFP_KERNEL); | ||
1641 | if (!sdesc) { | ||
1642 | crypto_free_shash(tfm); | ||
1643 | return -ENOMEM; | ||
1644 | } | ||
1645 | |||
1646 | sdesc->shash.tfm = tfm; | ||
1647 | sdesc->shash.flags = 0; | ||
1648 | drbg->priv_data = sdesc; | ||
1649 | return 0; | ||
1650 | } | ||
1651 | |||
1652 | static int drbg_fini_hash_kernel(struct drbg_state *drbg) | ||
1653 | { | ||
1654 | struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; | ||
1655 | if (sdesc) { | ||
1656 | crypto_free_shash(sdesc->shash.tfm); | ||
1657 | kzfree(sdesc); | ||
1658 | } | ||
1659 | drbg->priv_data = NULL; | ||
1660 | return 0; | ||
1661 | } | ||
1662 | |||
1663 | static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key, | ||
1664 | unsigned char *outval, const struct list_head *in) | ||
1665 | { | ||
1666 | struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; | ||
1667 | struct drbg_string *input = NULL; | ||
1668 | |||
1669 | if (key) | ||
1670 | crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg)); | ||
1671 | crypto_shash_init(&sdesc->shash); | ||
1672 | list_for_each_entry(input, in, list) | ||
1673 | crypto_shash_update(&sdesc->shash, input->buf, input->len); | ||
1674 | return crypto_shash_final(&sdesc->shash, outval); | ||
1675 | } | ||
1676 | #endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ | ||
1677 | |||
1678 | #ifdef CONFIG_CRYPTO_DRBG_CTR | ||
1679 | static int drbg_init_sym_kernel(struct drbg_state *drbg) | ||
1680 | { | ||
1681 | int ret = 0; | ||
1682 | struct crypto_blkcipher *tfm; | ||
1683 | |||
1684 | tfm = crypto_alloc_blkcipher(drbg->core->backend_cra_name, 0, 0); | ||
1685 | if (IS_ERR(tfm)) { | ||
1686 | pr_info("DRBG: could not allocate cipher TFM handle\n"); | ||
1687 | return PTR_ERR(tfm); | ||
1688 | } | ||
1689 | BUG_ON(drbg_blocklen(drbg) != crypto_blkcipher_blocksize(tfm)); | ||
1690 | drbg->priv_data = tfm; | ||
1691 | return ret; | ||
1692 | } | ||
1693 | |||
1694 | static int drbg_fini_sym_kernel(struct drbg_state *drbg) | ||
1695 | { | ||
1696 | struct crypto_blkcipher *tfm = | ||
1697 | (struct crypto_blkcipher *)drbg->priv_data; | ||
1698 | if (tfm) | ||
1699 | crypto_free_blkcipher(tfm); | ||
1700 | drbg->priv_data = NULL; | ||
1701 | return 0; | ||
1702 | } | ||
1703 | |||
1704 | static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, | ||
1705 | unsigned char *outval, const struct drbg_string *in) | ||
1706 | { | ||
1707 | int ret = 0; | ||
1708 | struct scatterlist sg_in, sg_out; | ||
1709 | struct blkcipher_desc desc; | ||
1710 | struct crypto_blkcipher *tfm = | ||
1711 | (struct crypto_blkcipher *)drbg->priv_data; | ||
1712 | |||
1713 | desc.tfm = tfm; | ||
1714 | desc.flags = 0; | ||
1715 | crypto_blkcipher_setkey(tfm, key, (drbg_keylen(drbg))); | ||
1716 | /* there is only component in *in */ | ||
1717 | sg_init_one(&sg_in, in->buf, in->len); | ||
1718 | sg_init_one(&sg_out, outval, drbg_blocklen(drbg)); | ||
1719 | ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, in->len); | ||
1720 | |||
1721 | return ret; | ||
1722 | } | ||
1723 | #endif /* CONFIG_CRYPTO_DRBG_CTR */ | ||
1724 | |||
1725 | /*************************************************************** | ||
1726 | * Kernel crypto API interface to register DRBG | ||
1727 | ***************************************************************/ | ||
1728 | |||
1729 | /* | ||
1730 | * Look up the DRBG flags by given kernel crypto API cra_name | ||
1731 | * The code uses the drbg_cores definition to do this | ||
1732 | * | ||
1733 | * @cra_name kernel crypto API cra_name | ||
1734 | * @coreref reference to integer which is filled with the pointer to | ||
1735 | * the applicable core | ||
1736 | * @pr reference for setting prediction resistance | ||
1737 | * | ||
1738 | * return: flags | ||
1739 | */ | ||
1740 | static inline void drbg_convert_tfm_core(const char *cra_driver_name, | ||
1741 | int *coreref, bool *pr) | ||
1742 | { | ||
1743 | int i = 0; | ||
1744 | size_t start = 0; | ||
1745 | int len = 0; | ||
1746 | |||
1747 | *pr = true; | ||
1748 | /* disassemble the names */ | ||
1749 | if (!memcmp(cra_driver_name, "drbg_nopr_", 10)) { | ||
1750 | start = 10; | ||
1751 | *pr = false; | ||
1752 | } else if (!memcmp(cra_driver_name, "drbg_pr_", 8)) { | ||
1753 | start = 8; | ||
1754 | } else { | ||
1755 | return; | ||
1756 | } | ||
1757 | |||
1758 | /* remove the first part */ | ||
1759 | len = strlen(cra_driver_name) - start; | ||
1760 | for (i = 0; ARRAY_SIZE(drbg_cores) > i; i++) { | ||
1761 | if (!memcmp(cra_driver_name + start, drbg_cores[i].cra_name, | ||
1762 | len)) { | ||
1763 | *coreref = i; | ||
1764 | return; | ||
1765 | } | ||
1766 | } | ||
1767 | } | ||
1768 | |||
1769 | static int drbg_kcapi_init(struct crypto_tfm *tfm) | ||
1770 | { | ||
1771 | struct drbg_state *drbg = crypto_tfm_ctx(tfm); | ||
1772 | bool pr = false; | ||
1773 | int coreref = 0; | ||
1774 | |||
1775 | drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr); | ||
1776 | /* | ||
1777 | * when personalization string is needed, the caller must call reset | ||
1778 | * and provide the personalization string as seed information | ||
1779 | */ | ||
1780 | return drbg_instantiate(drbg, NULL, coreref, pr); | ||
1781 | } | ||
1782 | |||
1783 | static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) | ||
1784 | { | ||
1785 | drbg_uninstantiate(crypto_tfm_ctx(tfm)); | ||
1786 | } | ||
1787 | |||
1788 | /* | ||
1789 | * Generate random numbers invoked by the kernel crypto API: | ||
1790 | * The API of the kernel crypto API is extended as follows: | ||
1791 | * | ||
1792 | * If dlen is larger than zero, rdata is interpreted as the output buffer | ||
1793 | * where random data is to be stored. | ||
1794 | * | ||
1795 | * If dlen is zero, rdata is interpreted as a pointer to a struct drbg_gen | ||
1796 | * which holds the additional information string that is used for the | ||
1797 | * DRBG generation process. The output buffer that is to be used to store | ||
1798 | * data is also pointed to by struct drbg_gen. | ||
1799 | */ | ||
1800 | static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata, | ||
1801 | unsigned int dlen) | ||
1802 | { | ||
1803 | struct drbg_state *drbg = crypto_rng_ctx(tfm); | ||
1804 | if (0 < dlen) { | ||
1805 | return drbg_generate_long(drbg, rdata, dlen, NULL); | ||
1806 | } else { | ||
1807 | struct drbg_gen *data = (struct drbg_gen *)rdata; | ||
1808 | struct drbg_string addtl; | ||
1809 | /* catch NULL pointer */ | ||
1810 | if (!data) | ||
1811 | return 0; | ||
1812 | drbg_set_testdata(drbg, data->test_data); | ||
1813 | /* linked list variable is now local to allow modification */ | ||
1814 | drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len); | ||
1815 | return drbg_generate_long(drbg, data->outbuf, data->outlen, | ||
1816 | &addtl); | ||
1817 | } | ||
1818 | } | ||
1819 | |||
1820 | /* | ||
1821 | * Reset the DRBG invoked by the kernel crypto API | ||
1822 | * The reset implies a full re-initialization of the DRBG. Similar to the | ||
1823 | * generate function of drbg_kcapi_random, this function extends the | ||
1824 | * kernel crypto API interface with struct drbg_gen | ||
1825 | */ | ||
1826 | static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | ||
1827 | { | ||
1828 | struct drbg_state *drbg = crypto_rng_ctx(tfm); | ||
1829 | struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm); | ||
1830 | bool pr = false; | ||
1831 | struct drbg_string seed_string; | ||
1832 | int coreref = 0; | ||
1833 | |||
1834 | drbg_uninstantiate(drbg); | ||
1835 | drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref, | ||
1836 | &pr); | ||
1837 | if (0 < slen) { | ||
1838 | drbg_string_fill(&seed_string, seed, slen); | ||
1839 | return drbg_instantiate(drbg, &seed_string, coreref, pr); | ||
1840 | } else { | ||
1841 | struct drbg_gen *data = (struct drbg_gen *)seed; | ||
1842 | /* allow invocation of API call with NULL, 0 */ | ||
1843 | if (!data) | ||
1844 | return drbg_instantiate(drbg, NULL, coreref, pr); | ||
1845 | drbg_set_testdata(drbg, data->test_data); | ||
1846 | /* linked list variable is now local to allow modification */ | ||
1847 | drbg_string_fill(&seed_string, data->addtl->buf, | ||
1848 | data->addtl->len); | ||
1849 | return drbg_instantiate(drbg, &seed_string, coreref, pr); | ||
1850 | } | ||
1851 | } | ||
1852 | |||
1853 | /*************************************************************** | ||
1854 | * Kernel module: code to load the module | ||
1855 | ***************************************************************/ | ||
1856 | |||
1857 | /* | ||
1858 | * Tests as defined in 11.3.2 in addition to the cipher tests: testing | ||
1859 | * of the error handling. | ||
1860 | * | ||
1861 | * Note: testing of failing seed source as defined in 11.3.2 is not applicable | ||
1862 | * as seed source of get_random_bytes does not fail. | ||
1863 | * | ||
1864 | * Note 2: There is no sensible way of testing the reseed counter | ||
1865 | * enforcement, so skip it. | ||
1866 | */ | ||
1867 | static inline int __init drbg_healthcheck_sanity(void) | ||
1868 | { | ||
1869 | #ifdef CONFIG_CRYPTO_FIPS | ||
1870 | int len = 0; | ||
1871 | #define OUTBUFLEN 16 | ||
1872 | unsigned char buf[OUTBUFLEN]; | ||
1873 | struct drbg_state *drbg = NULL; | ||
1874 | int ret = -EFAULT; | ||
1875 | int rc = -EFAULT; | ||
1876 | bool pr = false; | ||
1877 | int coreref = 0; | ||
1878 | struct drbg_string addtl; | ||
1879 | size_t max_addtllen, max_request_bytes; | ||
1880 | |||
1881 | /* only perform test in FIPS mode */ | ||
1882 | if (!fips_enabled) | ||
1883 | return 0; | ||
1884 | |||
1885 | #ifdef CONFIG_CRYPTO_DRBG_CTR | ||
1886 | drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr); | ||
1887 | #elif defined CONFIG_CRYPTO_DRBG_HASH | ||
1888 | drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr); | ||
1889 | #else | ||
1890 | drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr); | ||
1891 | #endif | ||
1892 | |||
1893 | drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); | ||
1894 | if (!drbg) | ||
1895 | return -ENOMEM; | ||
1896 | |||
1897 | /* | ||
1898 | * if the following tests fail, it is likely that there is a buffer | ||
1899 | * overflow as buf is much smaller than the requested or provided | ||
1900 | * string lengths -- in case the error handling does not succeed | ||
1901 | * we may get an OOPS. And we want to get an OOPS as this is a | ||
1902 | * grave bug. | ||
1903 | */ | ||
1904 | |||
1905 | /* get a valid instance of DRBG for following tests */ | ||
1906 | ret = drbg_instantiate(drbg, NULL, coreref, pr); | ||
1907 | if (ret) { | ||
1908 | rc = ret; | ||
1909 | goto outbuf; | ||
1910 | } | ||
1911 | max_addtllen = drbg_max_addtl(drbg); | ||
1912 | max_request_bytes = drbg_max_request_bytes(drbg); | ||
1913 | drbg_string_fill(&addtl, buf, max_addtllen + 1); | ||
1914 | /* overflow addtllen with additonal info string */ | ||
1915 | len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl); | ||
1916 | BUG_ON(0 < len); | ||
1917 | /* overflow max_bits */ | ||
1918 | len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL); | ||
1919 | BUG_ON(0 < len); | ||
1920 | drbg_uninstantiate(drbg); | ||
1921 | |||
1922 | /* overflow max addtllen with personalization string */ | ||
1923 | ret = drbg_instantiate(drbg, &addtl, coreref, pr); | ||
1924 | BUG_ON(0 == ret); | ||
1925 | /* test uninstantated DRBG */ | ||
1926 | len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL); | ||
1927 | BUG_ON(0 < len); | ||
1928 | /* all tests passed */ | ||
1929 | rc = 0; | ||
1930 | |||
1931 | pr_devel("DRBG: Sanity tests for failure code paths successfully " | ||
1932 | "completed\n"); | ||
1933 | |||
1934 | drbg_uninstantiate(drbg); | ||
1935 | outbuf: | ||
1936 | kzfree(drbg); | ||
1937 | return rc; | ||
1938 | #else /* CONFIG_CRYPTO_FIPS */ | ||
1939 | return 0; | ||
1940 | #endif /* CONFIG_CRYPTO_FIPS */ | ||
1941 | } | ||
1942 | |||
1943 | static struct crypto_alg drbg_algs[22]; | ||
1944 | |||
1945 | /* | ||
1946 | * Fill the array drbg_algs used to register the different DRBGs | ||
1947 | * with the kernel crypto API. To fill the array, the information | ||
1948 | * from drbg_cores[] is used. | ||
1949 | */ | ||
1950 | static inline void __init drbg_fill_array(struct crypto_alg *alg, | ||
1951 | const struct drbg_core *core, int pr) | ||
1952 | { | ||
1953 | int pos = 0; | ||
1954 | static int priority = 100; | ||
1955 | |||
1956 | memset(alg, 0, sizeof(struct crypto_alg)); | ||
1957 | memcpy(alg->cra_name, "stdrng", 6); | ||
1958 | if (pr) { | ||
1959 | memcpy(alg->cra_driver_name, "drbg_pr_", 8); | ||
1960 | pos = 8; | ||
1961 | } else { | ||
1962 | memcpy(alg->cra_driver_name, "drbg_nopr_", 10); | ||
1963 | pos = 10; | ||
1964 | } | ||
1965 | memcpy(alg->cra_driver_name + pos, core->cra_name, | ||
1966 | strlen(core->cra_name)); | ||
1967 | |||
1968 | alg->cra_priority = priority; | ||
1969 | priority++; | ||
1970 | /* | ||
1971 | * If FIPS mode enabled, the selected DRBG shall have the | ||
1972 | * highest cra_priority over other stdrng instances to ensure | ||
1973 | * it is selected. | ||
1974 | */ | ||
1975 | if (fips_enabled) | ||
1976 | alg->cra_priority += 200; | ||
1977 | |||
1978 | alg->cra_flags = CRYPTO_ALG_TYPE_RNG; | ||
1979 | alg->cra_ctxsize = sizeof(struct drbg_state); | ||
1980 | alg->cra_type = &crypto_rng_type; | ||
1981 | alg->cra_module = THIS_MODULE; | ||
1982 | alg->cra_init = drbg_kcapi_init; | ||
1983 | alg->cra_exit = drbg_kcapi_cleanup; | ||
1984 | alg->cra_u.rng.rng_make_random = drbg_kcapi_random; | ||
1985 | alg->cra_u.rng.rng_reset = drbg_kcapi_reset; | ||
1986 | alg->cra_u.rng.seedsize = 0; | ||
1987 | } | ||
1988 | |||
1989 | static int __init drbg_init(void) | ||
1990 | { | ||
1991 | unsigned int i = 0; /* pointer to drbg_algs */ | ||
1992 | unsigned int j = 0; /* pointer to drbg_cores */ | ||
1993 | int ret = -EFAULT; | ||
1994 | |||
1995 | ret = drbg_healthcheck_sanity(); | ||
1996 | if (ret) | ||
1997 | return ret; | ||
1998 | |||
1999 | if (ARRAY_SIZE(drbg_cores) * 2 > ARRAY_SIZE(drbg_algs)) { | ||
2000 | pr_info("DRBG: Cannot register all DRBG types" | ||
2001 | "(slots needed: %zu, slots available: %zu)\n", | ||
2002 | ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs)); | ||
2003 | return ret; | ||
2004 | } | ||
2005 | |||
2006 | /* | ||
2007 | * each DRBG definition can be used with PR and without PR, thus | ||
2008 | * we instantiate each DRBG in drbg_cores[] twice. | ||
2009 | * | ||
2010 | * As the order of placing them into the drbg_algs array matters | ||
2011 | * (the later DRBGs receive a higher cra_priority) we register the | ||
2012 | * prediction resistance DRBGs first as the should not be too | ||
2013 | * interesting. | ||
2014 | */ | ||
2015 | for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++) | ||
2016 | drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1); | ||
2017 | for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++) | ||
2018 | drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0); | ||
2019 | return crypto_register_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); | ||
2020 | } | ||
2021 | |||
2022 | static void __exit drbg_exit(void) | ||
2023 | { | ||
2024 | crypto_unregister_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); | ||
2025 | } | ||
2026 | |||
2027 | module_init(drbg_init); | ||
2028 | module_exit(drbg_exit); | ||
2029 | #ifndef CRYPTO_DRBG_HASH_STRING | ||
2030 | #define CRYPTO_DRBG_HASH_STRING "" | ||
2031 | #endif | ||
2032 | #ifndef CRYPTO_DRBG_HMAC_STRING | ||
2033 | #define CRYPTO_DRBG_HMAC_STRING "" | ||
2034 | #endif | ||
2035 | #ifndef CRYPTO_DRBG_CTR_STRING | ||
2036 | #define CRYPTO_DRBG_CTR_STRING "" | ||
2037 | #endif | ||
2038 | MODULE_LICENSE("GPL"); | ||
2039 | MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); | ||
2040 | MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) " | ||
2041 | "using following cores: " | ||
2042 | CRYPTO_DRBG_HASH_STRING | ||
2043 | CRYPTO_DRBG_HMAC_STRING | ||
2044 | CRYPTO_DRBG_CTR_STRING); | ||
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c index 42ce9f570aec..bf7ab4a89493 100644 --- a/crypto/eseqiv.c +++ b/crypto/eseqiv.c | |||
@@ -68,7 +68,7 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) | |||
68 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 68 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
69 | struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); | 69 | struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); |
70 | struct ablkcipher_request *subreq; | 70 | struct ablkcipher_request *subreq; |
71 | crypto_completion_t complete; | 71 | crypto_completion_t compl; |
72 | void *data; | 72 | void *data; |
73 | struct scatterlist *osrc, *odst; | 73 | struct scatterlist *osrc, *odst; |
74 | struct scatterlist *dst; | 74 | struct scatterlist *dst; |
@@ -86,7 +86,7 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) | |||
86 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); | 86 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); |
87 | 87 | ||
88 | giv = req->giv; | 88 | giv = req->giv; |
89 | complete = req->creq.base.complete; | 89 | compl = req->creq.base.complete; |
90 | data = req->creq.base.data; | 90 | data = req->creq.base.data; |
91 | 91 | ||
92 | osrc = req->creq.src; | 92 | osrc = req->creq.src; |
@@ -101,11 +101,11 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) | |||
101 | if (vsrc != giv + ivsize && vdst != giv + ivsize) { | 101 | if (vsrc != giv + ivsize && vdst != giv + ivsize) { |
102 | giv = PTR_ALIGN((u8 *)reqctx->tail, | 102 | giv = PTR_ALIGN((u8 *)reqctx->tail, |
103 | crypto_ablkcipher_alignmask(geniv) + 1); | 103 | crypto_ablkcipher_alignmask(geniv) + 1); |
104 | complete = eseqiv_complete; | 104 | compl = eseqiv_complete; |
105 | data = req; | 105 | data = req; |
106 | } | 106 | } |
107 | 107 | ||
108 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete, | 108 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, |
109 | data); | 109 | data); |
110 | 110 | ||
111 | sg_init_table(reqctx->src, 2); | 111 | sg_init_table(reqctx->src, 2); |
diff --git a/crypto/gcm.c b/crypto/gcm.c index b4f017939004..276cdac567b6 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -228,14 +228,14 @@ static void gcm_hash_final_done(struct crypto_async_request *areq, int err); | |||
228 | 228 | ||
229 | static int gcm_hash_update(struct aead_request *req, | 229 | static int gcm_hash_update(struct aead_request *req, |
230 | struct crypto_gcm_req_priv_ctx *pctx, | 230 | struct crypto_gcm_req_priv_ctx *pctx, |
231 | crypto_completion_t complete, | 231 | crypto_completion_t compl, |
232 | struct scatterlist *src, | 232 | struct scatterlist *src, |
233 | unsigned int len) | 233 | unsigned int len) |
234 | { | 234 | { |
235 | struct ahash_request *ahreq = &pctx->u.ahreq; | 235 | struct ahash_request *ahreq = &pctx->u.ahreq; |
236 | 236 | ||
237 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 237 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
238 | complete, req); | 238 | compl, req); |
239 | ahash_request_set_crypt(ahreq, src, NULL, len); | 239 | ahash_request_set_crypt(ahreq, src, NULL, len); |
240 | 240 | ||
241 | return crypto_ahash_update(ahreq); | 241 | return crypto_ahash_update(ahreq); |
@@ -244,12 +244,12 @@ static int gcm_hash_update(struct aead_request *req, | |||
244 | static int gcm_hash_remain(struct aead_request *req, | 244 | static int gcm_hash_remain(struct aead_request *req, |
245 | struct crypto_gcm_req_priv_ctx *pctx, | 245 | struct crypto_gcm_req_priv_ctx *pctx, |
246 | unsigned int remain, | 246 | unsigned int remain, |
247 | crypto_completion_t complete) | 247 | crypto_completion_t compl) |
248 | { | 248 | { |
249 | struct ahash_request *ahreq = &pctx->u.ahreq; | 249 | struct ahash_request *ahreq = &pctx->u.ahreq; |
250 | 250 | ||
251 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 251 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
252 | complete, req); | 252 | compl, req); |
253 | sg_init_one(pctx->src, gcm_zeroes, remain); | 253 | sg_init_one(pctx->src, gcm_zeroes, remain); |
254 | ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); | 254 | ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); |
255 | 255 | ||
@@ -375,14 +375,14 @@ static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) | |||
375 | { | 375 | { |
376 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 376 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
377 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 377 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
378 | crypto_completion_t complete; | 378 | crypto_completion_t compl; |
379 | unsigned int remain = 0; | 379 | unsigned int remain = 0; |
380 | 380 | ||
381 | if (!err && gctx->cryptlen) { | 381 | if (!err && gctx->cryptlen) { |
382 | remain = gcm_remain(gctx->cryptlen); | 382 | remain = gcm_remain(gctx->cryptlen); |
383 | complete = remain ? gcm_hash_crypt_done : | 383 | compl = remain ? gcm_hash_crypt_done : |
384 | gcm_hash_crypt_remain_done; | 384 | gcm_hash_crypt_remain_done; |
385 | err = gcm_hash_update(req, pctx, complete, | 385 | err = gcm_hash_update(req, pctx, compl, |
386 | gctx->src, gctx->cryptlen); | 386 | gctx->src, gctx->cryptlen); |
387 | if (err == -EINPROGRESS || err == -EBUSY) | 387 | if (err == -EINPROGRESS || err == -EBUSY) |
388 | return; | 388 | return; |
@@ -429,14 +429,14 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) | |||
429 | static void __gcm_hash_init_done(struct aead_request *req, int err) | 429 | static void __gcm_hash_init_done(struct aead_request *req, int err) |
430 | { | 430 | { |
431 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 431 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
432 | crypto_completion_t complete; | 432 | crypto_completion_t compl; |
433 | unsigned int remain = 0; | 433 | unsigned int remain = 0; |
434 | 434 | ||
435 | if (!err && req->assoclen) { | 435 | if (!err && req->assoclen) { |
436 | remain = gcm_remain(req->assoclen); | 436 | remain = gcm_remain(req->assoclen); |
437 | complete = remain ? gcm_hash_assoc_done : | 437 | compl = remain ? gcm_hash_assoc_done : |
438 | gcm_hash_assoc_remain_done; | 438 | gcm_hash_assoc_remain_done; |
439 | err = gcm_hash_update(req, pctx, complete, | 439 | err = gcm_hash_update(req, pctx, compl, |
440 | req->assoc, req->assoclen); | 440 | req->assoc, req->assoclen); |
441 | if (err == -EINPROGRESS || err == -EBUSY) | 441 | if (err == -EINPROGRESS || err == -EBUSY) |
442 | return; | 442 | return; |
@@ -462,7 +462,7 @@ static int gcm_hash(struct aead_request *req, | |||
462 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 462 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
463 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 463 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
464 | unsigned int remain; | 464 | unsigned int remain; |
465 | crypto_completion_t complete; | 465 | crypto_completion_t compl; |
466 | int err; | 466 | int err; |
467 | 467 | ||
468 | ahash_request_set_tfm(ahreq, ctx->ghash); | 468 | ahash_request_set_tfm(ahreq, ctx->ghash); |
@@ -473,8 +473,8 @@ static int gcm_hash(struct aead_request *req, | |||
473 | if (err) | 473 | if (err) |
474 | return err; | 474 | return err; |
475 | remain = gcm_remain(req->assoclen); | 475 | remain = gcm_remain(req->assoclen); |
476 | complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; | 476 | compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; |
477 | err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen); | 477 | err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen); |
478 | if (err) | 478 | if (err) |
479 | return err; | 479 | return err; |
480 | if (remain) { | 480 | if (remain) { |
@@ -484,8 +484,8 @@ static int gcm_hash(struct aead_request *req, | |||
484 | return err; | 484 | return err; |
485 | } | 485 | } |
486 | remain = gcm_remain(gctx->cryptlen); | 486 | remain = gcm_remain(gctx->cryptlen); |
487 | complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; | 487 | compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; |
488 | err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen); | 488 | err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen); |
489 | if (err) | 489 | if (err) |
490 | return err; | 490 | return err; |
491 | if (remain) { | 491 | if (remain) { |
diff --git a/crypto/lzo.c b/crypto/lzo.c index 1c2aa69c54b8..a8ff2f778dc4 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/crypto.h> | 21 | #include <linux/crypto.h> |
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/mm.h> | ||
23 | #include <linux/lzo.h> | 24 | #include <linux/lzo.h> |
24 | 25 | ||
25 | struct lzo_ctx { | 26 | struct lzo_ctx { |
@@ -30,7 +31,10 @@ static int lzo_init(struct crypto_tfm *tfm) | |||
30 | { | 31 | { |
31 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | 32 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); |
32 | 33 | ||
33 | ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); | 34 | ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS, |
35 | GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); | ||
36 | if (!ctx->lzo_comp_mem) | ||
37 | ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); | ||
34 | if (!ctx->lzo_comp_mem) | 38 | if (!ctx->lzo_comp_mem) |
35 | return -ENOMEM; | 39 | return -ENOMEM; |
36 | 40 | ||
@@ -41,7 +45,7 @@ static void lzo_exit(struct crypto_tfm *tfm) | |||
41 | { | 45 | { |
42 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | 46 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); |
43 | 47 | ||
44 | vfree(ctx->lzo_comp_mem); | 48 | kvfree(ctx->lzo_comp_mem); |
45 | } | 49 | } |
46 | 50 | ||
47 | static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, | 51 | static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, |
diff --git a/crypto/seqiv.c b/crypto/seqiv.c index f2cba4ed6f25..ee190fcedcd2 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c | |||
@@ -100,7 +100,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) | |||
100 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 100 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
101 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 101 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
102 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | 102 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
103 | crypto_completion_t complete; | 103 | crypto_completion_t compl; |
104 | void *data; | 104 | void *data; |
105 | u8 *info; | 105 | u8 *info; |
106 | unsigned int ivsize; | 106 | unsigned int ivsize; |
@@ -108,7 +108,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) | |||
108 | 108 | ||
109 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); | 109 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); |
110 | 110 | ||
111 | complete = req->creq.base.complete; | 111 | compl = req->creq.base.complete; |
112 | data = req->creq.base.data; | 112 | data = req->creq.base.data; |
113 | info = req->creq.info; | 113 | info = req->creq.info; |
114 | 114 | ||
@@ -122,11 +122,11 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) | |||
122 | if (!info) | 122 | if (!info) |
123 | return -ENOMEM; | 123 | return -ENOMEM; |
124 | 124 | ||
125 | complete = seqiv_complete; | 125 | compl = seqiv_complete; |
126 | data = req; | 126 | data = req; |
127 | } | 127 | } |
128 | 128 | ||
129 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete, | 129 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, |
130 | data); | 130 | data); |
131 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, | 131 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, |
132 | req->creq.nbytes, info); | 132 | req->creq.nbytes, info); |
@@ -146,7 +146,7 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) | |||
146 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); | 146 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); |
147 | struct aead_request *areq = &req->areq; | 147 | struct aead_request *areq = &req->areq; |
148 | struct aead_request *subreq = aead_givcrypt_reqctx(req); | 148 | struct aead_request *subreq = aead_givcrypt_reqctx(req); |
149 | crypto_completion_t complete; | 149 | crypto_completion_t compl; |
150 | void *data; | 150 | void *data; |
151 | u8 *info; | 151 | u8 *info; |
152 | unsigned int ivsize; | 152 | unsigned int ivsize; |
@@ -154,7 +154,7 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) | |||
154 | 154 | ||
155 | aead_request_set_tfm(subreq, aead_geniv_base(geniv)); | 155 | aead_request_set_tfm(subreq, aead_geniv_base(geniv)); |
156 | 156 | ||
157 | complete = areq->base.complete; | 157 | compl = areq->base.complete; |
158 | data = areq->base.data; | 158 | data = areq->base.data; |
159 | info = areq->iv; | 159 | info = areq->iv; |
160 | 160 | ||
@@ -168,11 +168,11 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) | |||
168 | if (!info) | 168 | if (!info) |
169 | return -ENOMEM; | 169 | return -ENOMEM; |
170 | 170 | ||
171 | complete = seqiv_aead_complete; | 171 | compl = seqiv_aead_complete; |
172 | data = req; | 172 | data = req; |
173 | } | 173 | } |
174 | 174 | ||
175 | aead_request_set_callback(subreq, areq->base.flags, complete, data); | 175 | aead_request_set_callback(subreq, areq->base.flags, compl, data); |
176 | aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen, | 176 | aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen, |
177 | info); | 177 | info); |
178 | aead_request_set_assoc(subreq, areq->assoc, areq->assoclen); | 178 | aead_request_set_assoc(subreq, areq->assoc, areq->assoclen); |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ba247cf30858..890449e6e7ef 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -48,6 +48,11 @@ | |||
48 | #define DECRYPT 0 | 48 | #define DECRYPT 0 |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * return a string with the driver name | ||
52 | */ | ||
53 | #define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm)) | ||
54 | |||
55 | /* | ||
51 | * Used by test_cipher_speed() | 56 | * Used by test_cipher_speed() |
52 | */ | 57 | */ |
53 | static unsigned int sec; | 58 | static unsigned int sec; |
@@ -68,13 +73,13 @@ static char *check[] = { | |||
68 | }; | 73 | }; |
69 | 74 | ||
70 | static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, | 75 | static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, |
71 | struct scatterlist *sg, int blen, int sec) | 76 | struct scatterlist *sg, int blen, int secs) |
72 | { | 77 | { |
73 | unsigned long start, end; | 78 | unsigned long start, end; |
74 | int bcount; | 79 | int bcount; |
75 | int ret; | 80 | int ret; |
76 | 81 | ||
77 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 82 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
78 | time_before(jiffies, end); bcount++) { | 83 | time_before(jiffies, end); bcount++) { |
79 | if (enc) | 84 | if (enc) |
80 | ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); | 85 | ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); |
@@ -86,7 +91,7 @@ static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, | |||
86 | } | 91 | } |
87 | 92 | ||
88 | printk("%d operations in %d seconds (%ld bytes)\n", | 93 | printk("%d operations in %d seconds (%ld bytes)\n", |
89 | bcount, sec, (long)bcount * blen); | 94 | bcount, secs, (long)bcount * blen); |
90 | return 0; | 95 | return 0; |
91 | } | 96 | } |
92 | 97 | ||
@@ -138,13 +143,13 @@ out: | |||
138 | } | 143 | } |
139 | 144 | ||
140 | static int test_aead_jiffies(struct aead_request *req, int enc, | 145 | static int test_aead_jiffies(struct aead_request *req, int enc, |
141 | int blen, int sec) | 146 | int blen, int secs) |
142 | { | 147 | { |
143 | unsigned long start, end; | 148 | unsigned long start, end; |
144 | int bcount; | 149 | int bcount; |
145 | int ret; | 150 | int ret; |
146 | 151 | ||
147 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 152 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
148 | time_before(jiffies, end); bcount++) { | 153 | time_before(jiffies, end); bcount++) { |
149 | if (enc) | 154 | if (enc) |
150 | ret = crypto_aead_encrypt(req); | 155 | ret = crypto_aead_encrypt(req); |
@@ -156,7 +161,7 @@ static int test_aead_jiffies(struct aead_request *req, int enc, | |||
156 | } | 161 | } |
157 | 162 | ||
158 | printk("%d operations in %d seconds (%ld bytes)\n", | 163 | printk("%d operations in %d seconds (%ld bytes)\n", |
159 | bcount, sec, (long)bcount * blen); | 164 | bcount, secs, (long)bcount * blen); |
160 | return 0; | 165 | return 0; |
161 | } | 166 | } |
162 | 167 | ||
@@ -260,7 +265,7 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], | |||
260 | } | 265 | } |
261 | } | 266 | } |
262 | 267 | ||
263 | static void test_aead_speed(const char *algo, int enc, unsigned int sec, | 268 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, |
264 | struct aead_speed_template *template, | 269 | struct aead_speed_template *template, |
265 | unsigned int tcount, u8 authsize, | 270 | unsigned int tcount, u8 authsize, |
266 | unsigned int aad_size, u8 *keysize) | 271 | unsigned int aad_size, u8 *keysize) |
@@ -305,9 +310,6 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec, | |||
305 | asg = &sg[8]; | 310 | asg = &sg[8]; |
306 | sgout = &asg[8]; | 311 | sgout = &asg[8]; |
307 | 312 | ||
308 | |||
309 | printk(KERN_INFO "\ntesting speed of %s %s\n", algo, e); | ||
310 | |||
311 | tfm = crypto_alloc_aead(algo, 0, 0); | 313 | tfm = crypto_alloc_aead(algo, 0, 0); |
312 | 314 | ||
313 | if (IS_ERR(tfm)) { | 315 | if (IS_ERR(tfm)) { |
@@ -316,6 +318,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec, | |||
316 | goto out_notfm; | 318 | goto out_notfm; |
317 | } | 319 | } |
318 | 320 | ||
321 | printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, | ||
322 | get_driver_name(crypto_aead, tfm), e); | ||
323 | |||
319 | req = aead_request_alloc(tfm, GFP_KERNEL); | 324 | req = aead_request_alloc(tfm, GFP_KERNEL); |
320 | if (!req) { | 325 | if (!req) { |
321 | pr_err("alg: aead: Failed to allocate request for %s\n", | 326 | pr_err("alg: aead: Failed to allocate request for %s\n", |
@@ -374,8 +379,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec, | |||
374 | aead_request_set_crypt(req, sg, sgout, *b_size, iv); | 379 | aead_request_set_crypt(req, sg, sgout, *b_size, iv); |
375 | aead_request_set_assoc(req, asg, aad_size); | 380 | aead_request_set_assoc(req, asg, aad_size); |
376 | 381 | ||
377 | if (sec) | 382 | if (secs) |
378 | ret = test_aead_jiffies(req, enc, *b_size, sec); | 383 | ret = test_aead_jiffies(req, enc, *b_size, |
384 | secs); | ||
379 | else | 385 | else |
380 | ret = test_aead_cycles(req, enc, *b_size); | 386 | ret = test_aead_cycles(req, enc, *b_size); |
381 | 387 | ||
@@ -405,7 +411,7 @@ out_noxbuf: | |||
405 | return; | 411 | return; |
406 | } | 412 | } |
407 | 413 | ||
408 | static void test_cipher_speed(const char *algo, int enc, unsigned int sec, | 414 | static void test_cipher_speed(const char *algo, int enc, unsigned int secs, |
409 | struct cipher_speed_template *template, | 415 | struct cipher_speed_template *template, |
410 | unsigned int tcount, u8 *keysize) | 416 | unsigned int tcount, u8 *keysize) |
411 | { | 417 | { |
@@ -422,8 +428,6 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec, | |||
422 | else | 428 | else |
423 | e = "decryption"; | 429 | e = "decryption"; |
424 | 430 | ||
425 | printk("\ntesting speed of %s %s\n", algo, e); | ||
426 | |||
427 | tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); | 431 | tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); |
428 | 432 | ||
429 | if (IS_ERR(tfm)) { | 433 | if (IS_ERR(tfm)) { |
@@ -434,6 +438,9 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec, | |||
434 | desc.tfm = tfm; | 438 | desc.tfm = tfm; |
435 | desc.flags = 0; | 439 | desc.flags = 0; |
436 | 440 | ||
441 | printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, | ||
442 | get_driver_name(crypto_blkcipher, tfm), e); | ||
443 | |||
437 | i = 0; | 444 | i = 0; |
438 | do { | 445 | do { |
439 | 446 | ||
@@ -483,9 +490,9 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec, | |||
483 | crypto_blkcipher_set_iv(tfm, iv, iv_len); | 490 | crypto_blkcipher_set_iv(tfm, iv, iv_len); |
484 | } | 491 | } |
485 | 492 | ||
486 | if (sec) | 493 | if (secs) |
487 | ret = test_cipher_jiffies(&desc, enc, sg, | 494 | ret = test_cipher_jiffies(&desc, enc, sg, |
488 | *b_size, sec); | 495 | *b_size, secs); |
489 | else | 496 | else |
490 | ret = test_cipher_cycles(&desc, enc, sg, | 497 | ret = test_cipher_cycles(&desc, enc, sg, |
491 | *b_size); | 498 | *b_size); |
@@ -506,13 +513,13 @@ out: | |||
506 | 513 | ||
507 | static int test_hash_jiffies_digest(struct hash_desc *desc, | 514 | static int test_hash_jiffies_digest(struct hash_desc *desc, |
508 | struct scatterlist *sg, int blen, | 515 | struct scatterlist *sg, int blen, |
509 | char *out, int sec) | 516 | char *out, int secs) |
510 | { | 517 | { |
511 | unsigned long start, end; | 518 | unsigned long start, end; |
512 | int bcount; | 519 | int bcount; |
513 | int ret; | 520 | int ret; |
514 | 521 | ||
515 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 522 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
516 | time_before(jiffies, end); bcount++) { | 523 | time_before(jiffies, end); bcount++) { |
517 | ret = crypto_hash_digest(desc, sg, blen, out); | 524 | ret = crypto_hash_digest(desc, sg, blen, out); |
518 | if (ret) | 525 | if (ret) |
@@ -520,22 +527,22 @@ static int test_hash_jiffies_digest(struct hash_desc *desc, | |||
520 | } | 527 | } |
521 | 528 | ||
522 | printk("%6u opers/sec, %9lu bytes/sec\n", | 529 | printk("%6u opers/sec, %9lu bytes/sec\n", |
523 | bcount / sec, ((long)bcount * blen) / sec); | 530 | bcount / secs, ((long)bcount * blen) / secs); |
524 | 531 | ||
525 | return 0; | 532 | return 0; |
526 | } | 533 | } |
527 | 534 | ||
528 | static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg, | 535 | static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg, |
529 | int blen, int plen, char *out, int sec) | 536 | int blen, int plen, char *out, int secs) |
530 | { | 537 | { |
531 | unsigned long start, end; | 538 | unsigned long start, end; |
532 | int bcount, pcount; | 539 | int bcount, pcount; |
533 | int ret; | 540 | int ret; |
534 | 541 | ||
535 | if (plen == blen) | 542 | if (plen == blen) |
536 | return test_hash_jiffies_digest(desc, sg, blen, out, sec); | 543 | return test_hash_jiffies_digest(desc, sg, blen, out, secs); |
537 | 544 | ||
538 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 545 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
539 | time_before(jiffies, end); bcount++) { | 546 | time_before(jiffies, end); bcount++) { |
540 | ret = crypto_hash_init(desc); | 547 | ret = crypto_hash_init(desc); |
541 | if (ret) | 548 | if (ret) |
@@ -552,7 +559,7 @@ static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg, | |||
552 | } | 559 | } |
553 | 560 | ||
554 | printk("%6u opers/sec, %9lu bytes/sec\n", | 561 | printk("%6u opers/sec, %9lu bytes/sec\n", |
555 | bcount / sec, ((long)bcount * blen) / sec); | 562 | bcount / secs, ((long)bcount * blen) / secs); |
556 | 563 | ||
557 | return 0; | 564 | return 0; |
558 | } | 565 | } |
@@ -673,7 +680,7 @@ static void test_hash_sg_init(struct scatterlist *sg) | |||
673 | } | 680 | } |
674 | } | 681 | } |
675 | 682 | ||
676 | static void test_hash_speed(const char *algo, unsigned int sec, | 683 | static void test_hash_speed(const char *algo, unsigned int secs, |
677 | struct hash_speed *speed) | 684 | struct hash_speed *speed) |
678 | { | 685 | { |
679 | struct scatterlist sg[TVMEMSIZE]; | 686 | struct scatterlist sg[TVMEMSIZE]; |
@@ -683,8 +690,6 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
683 | int i; | 690 | int i; |
684 | int ret; | 691 | int ret; |
685 | 692 | ||
686 | printk(KERN_INFO "\ntesting speed of %s\n", algo); | ||
687 | |||
688 | tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); | 693 | tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); |
689 | 694 | ||
690 | if (IS_ERR(tfm)) { | 695 | if (IS_ERR(tfm)) { |
@@ -693,6 +698,9 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
693 | return; | 698 | return; |
694 | } | 699 | } |
695 | 700 | ||
701 | printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo, | ||
702 | get_driver_name(crypto_hash, tfm)); | ||
703 | |||
696 | desc.tfm = tfm; | 704 | desc.tfm = tfm; |
697 | desc.flags = 0; | 705 | desc.flags = 0; |
698 | 706 | ||
@@ -718,9 +726,9 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
718 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", | 726 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", |
719 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); | 727 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); |
720 | 728 | ||
721 | if (sec) | 729 | if (secs) |
722 | ret = test_hash_jiffies(&desc, sg, speed[i].blen, | 730 | ret = test_hash_jiffies(&desc, sg, speed[i].blen, |
723 | speed[i].plen, output, sec); | 731 | speed[i].plen, output, secs); |
724 | else | 732 | else |
725 | ret = test_hash_cycles(&desc, sg, speed[i].blen, | 733 | ret = test_hash_cycles(&desc, sg, speed[i].blen, |
726 | speed[i].plen, output); | 734 | speed[i].plen, output); |
@@ -765,13 +773,13 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) | |||
765 | } | 773 | } |
766 | 774 | ||
767 | static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, | 775 | static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, |
768 | char *out, int sec) | 776 | char *out, int secs) |
769 | { | 777 | { |
770 | unsigned long start, end; | 778 | unsigned long start, end; |
771 | int bcount; | 779 | int bcount; |
772 | int ret; | 780 | int ret; |
773 | 781 | ||
774 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 782 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
775 | time_before(jiffies, end); bcount++) { | 783 | time_before(jiffies, end); bcount++) { |
776 | ret = do_one_ahash_op(req, crypto_ahash_digest(req)); | 784 | ret = do_one_ahash_op(req, crypto_ahash_digest(req)); |
777 | if (ret) | 785 | if (ret) |
@@ -779,22 +787,22 @@ static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, | |||
779 | } | 787 | } |
780 | 788 | ||
781 | printk("%6u opers/sec, %9lu bytes/sec\n", | 789 | printk("%6u opers/sec, %9lu bytes/sec\n", |
782 | bcount / sec, ((long)bcount * blen) / sec); | 790 | bcount / secs, ((long)bcount * blen) / secs); |
783 | 791 | ||
784 | return 0; | 792 | return 0; |
785 | } | 793 | } |
786 | 794 | ||
787 | static int test_ahash_jiffies(struct ahash_request *req, int blen, | 795 | static int test_ahash_jiffies(struct ahash_request *req, int blen, |
788 | int plen, char *out, int sec) | 796 | int plen, char *out, int secs) |
789 | { | 797 | { |
790 | unsigned long start, end; | 798 | unsigned long start, end; |
791 | int bcount, pcount; | 799 | int bcount, pcount; |
792 | int ret; | 800 | int ret; |
793 | 801 | ||
794 | if (plen == blen) | 802 | if (plen == blen) |
795 | return test_ahash_jiffies_digest(req, blen, out, sec); | 803 | return test_ahash_jiffies_digest(req, blen, out, secs); |
796 | 804 | ||
797 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 805 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
798 | time_before(jiffies, end); bcount++) { | 806 | time_before(jiffies, end); bcount++) { |
799 | ret = crypto_ahash_init(req); | 807 | ret = crypto_ahash_init(req); |
800 | if (ret) | 808 | if (ret) |
@@ -811,7 +819,7 @@ static int test_ahash_jiffies(struct ahash_request *req, int blen, | |||
811 | } | 819 | } |
812 | 820 | ||
813 | pr_cont("%6u opers/sec, %9lu bytes/sec\n", | 821 | pr_cont("%6u opers/sec, %9lu bytes/sec\n", |
814 | bcount / sec, ((long)bcount * blen) / sec); | 822 | bcount / secs, ((long)bcount * blen) / secs); |
815 | 823 | ||
816 | return 0; | 824 | return 0; |
817 | } | 825 | } |
@@ -911,7 +919,7 @@ out: | |||
911 | return 0; | 919 | return 0; |
912 | } | 920 | } |
913 | 921 | ||
914 | static void test_ahash_speed(const char *algo, unsigned int sec, | 922 | static void test_ahash_speed(const char *algo, unsigned int secs, |
915 | struct hash_speed *speed) | 923 | struct hash_speed *speed) |
916 | { | 924 | { |
917 | struct scatterlist sg[TVMEMSIZE]; | 925 | struct scatterlist sg[TVMEMSIZE]; |
@@ -921,8 +929,6 @@ static void test_ahash_speed(const char *algo, unsigned int sec, | |||
921 | static char output[1024]; | 929 | static char output[1024]; |
922 | int i, ret; | 930 | int i, ret; |
923 | 931 | ||
924 | printk(KERN_INFO "\ntesting speed of async %s\n", algo); | ||
925 | |||
926 | tfm = crypto_alloc_ahash(algo, 0, 0); | 932 | tfm = crypto_alloc_ahash(algo, 0, 0); |
927 | if (IS_ERR(tfm)) { | 933 | if (IS_ERR(tfm)) { |
928 | pr_err("failed to load transform for %s: %ld\n", | 934 | pr_err("failed to load transform for %s: %ld\n", |
@@ -930,6 +936,9 @@ static void test_ahash_speed(const char *algo, unsigned int sec, | |||
930 | return; | 936 | return; |
931 | } | 937 | } |
932 | 938 | ||
939 | printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo, | ||
940 | get_driver_name(crypto_ahash, tfm)); | ||
941 | |||
933 | if (crypto_ahash_digestsize(tfm) > sizeof(output)) { | 942 | if (crypto_ahash_digestsize(tfm) > sizeof(output)) { |
934 | pr_err("digestsize(%u) > outputbuffer(%zu)\n", | 943 | pr_err("digestsize(%u) > outputbuffer(%zu)\n", |
935 | crypto_ahash_digestsize(tfm), sizeof(output)); | 944 | crypto_ahash_digestsize(tfm), sizeof(output)); |
@@ -960,9 +969,9 @@ static void test_ahash_speed(const char *algo, unsigned int sec, | |||
960 | 969 | ||
961 | ahash_request_set_crypt(req, sg, output, speed[i].plen); | 970 | ahash_request_set_crypt(req, sg, output, speed[i].plen); |
962 | 971 | ||
963 | if (sec) | 972 | if (secs) |
964 | ret = test_ahash_jiffies(req, speed[i].blen, | 973 | ret = test_ahash_jiffies(req, speed[i].blen, |
965 | speed[i].plen, output, sec); | 974 | speed[i].plen, output, secs); |
966 | else | 975 | else |
967 | ret = test_ahash_cycles(req, speed[i].blen, | 976 | ret = test_ahash_cycles(req, speed[i].blen, |
968 | speed[i].plen, output); | 977 | speed[i].plen, output); |
@@ -994,13 +1003,13 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret) | |||
994 | } | 1003 | } |
995 | 1004 | ||
996 | static int test_acipher_jiffies(struct ablkcipher_request *req, int enc, | 1005 | static int test_acipher_jiffies(struct ablkcipher_request *req, int enc, |
997 | int blen, int sec) | 1006 | int blen, int secs) |
998 | { | 1007 | { |
999 | unsigned long start, end; | 1008 | unsigned long start, end; |
1000 | int bcount; | 1009 | int bcount; |
1001 | int ret; | 1010 | int ret; |
1002 | 1011 | ||
1003 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 1012 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
1004 | time_before(jiffies, end); bcount++) { | 1013 | time_before(jiffies, end); bcount++) { |
1005 | if (enc) | 1014 | if (enc) |
1006 | ret = do_one_acipher_op(req, | 1015 | ret = do_one_acipher_op(req, |
@@ -1014,7 +1023,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc, | |||
1014 | } | 1023 | } |
1015 | 1024 | ||
1016 | pr_cont("%d operations in %d seconds (%ld bytes)\n", | 1025 | pr_cont("%d operations in %d seconds (%ld bytes)\n", |
1017 | bcount, sec, (long)bcount * blen); | 1026 | bcount, secs, (long)bcount * blen); |
1018 | return 0; | 1027 | return 0; |
1019 | } | 1028 | } |
1020 | 1029 | ||
@@ -1065,7 +1074,7 @@ out: | |||
1065 | return ret; | 1074 | return ret; |
1066 | } | 1075 | } |
1067 | 1076 | ||
1068 | static void test_acipher_speed(const char *algo, int enc, unsigned int sec, | 1077 | static void test_acipher_speed(const char *algo, int enc, unsigned int secs, |
1069 | struct cipher_speed_template *template, | 1078 | struct cipher_speed_template *template, |
1070 | unsigned int tcount, u8 *keysize) | 1079 | unsigned int tcount, u8 *keysize) |
1071 | { | 1080 | { |
@@ -1083,8 +1092,6 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec, | |||
1083 | else | 1092 | else |
1084 | e = "decryption"; | 1093 | e = "decryption"; |
1085 | 1094 | ||
1086 | pr_info("\ntesting speed of async %s %s\n", algo, e); | ||
1087 | |||
1088 | init_completion(&tresult.completion); | 1095 | init_completion(&tresult.completion); |
1089 | 1096 | ||
1090 | tfm = crypto_alloc_ablkcipher(algo, 0, 0); | 1097 | tfm = crypto_alloc_ablkcipher(algo, 0, 0); |
@@ -1095,6 +1102,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec, | |||
1095 | return; | 1102 | return; |
1096 | } | 1103 | } |
1097 | 1104 | ||
1105 | pr_info("\ntesting speed of async %s (%s) %s\n", algo, | ||
1106 | get_driver_name(crypto_ablkcipher, tfm), e); | ||
1107 | |||
1098 | req = ablkcipher_request_alloc(tfm, GFP_KERNEL); | 1108 | req = ablkcipher_request_alloc(tfm, GFP_KERNEL); |
1099 | if (!req) { | 1109 | if (!req) { |
1100 | pr_err("tcrypt: skcipher: Failed to allocate request for %s\n", | 1110 | pr_err("tcrypt: skcipher: Failed to allocate request for %s\n", |
@@ -1168,9 +1178,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec, | |||
1168 | 1178 | ||
1169 | ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv); | 1179 | ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv); |
1170 | 1180 | ||
1171 | if (sec) | 1181 | if (secs) |
1172 | ret = test_acipher_jiffies(req, enc, | 1182 | ret = test_acipher_jiffies(req, enc, |
1173 | *b_size, sec); | 1183 | *b_size, secs); |
1174 | else | 1184 | else |
1175 | ret = test_acipher_cycles(req, enc, | 1185 | ret = test_acipher_cycles(req, enc, |
1176 | *b_size); | 1186 | *b_size); |
@@ -1585,6 +1595,12 @@ static int do_test(int m) | |||
1585 | test_cipher_speed("cbc(des3_ede)", DECRYPT, sec, | 1595 | test_cipher_speed("cbc(des3_ede)", DECRYPT, sec, |
1586 | des3_speed_template, DES3_SPEED_VECTORS, | 1596 | des3_speed_template, DES3_SPEED_VECTORS, |
1587 | speed_template_24); | 1597 | speed_template_24); |
1598 | test_cipher_speed("ctr(des3_ede)", ENCRYPT, sec, | ||
1599 | des3_speed_template, DES3_SPEED_VECTORS, | ||
1600 | speed_template_24); | ||
1601 | test_cipher_speed("ctr(des3_ede)", DECRYPT, sec, | ||
1602 | des3_speed_template, DES3_SPEED_VECTORS, | ||
1603 | speed_template_24); | ||
1588 | break; | 1604 | break; |
1589 | 1605 | ||
1590 | case 202: | 1606 | case 202: |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 498649ac1953..ac2b63105afc 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/string.h> | 28 | #include <linux/string.h> |
29 | #include <crypto/rng.h> | 29 | #include <crypto/rng.h> |
30 | #include <crypto/drbg.h> | ||
30 | 31 | ||
31 | #include "internal.h" | 32 | #include "internal.h" |
32 | 33 | ||
@@ -108,6 +109,11 @@ struct cprng_test_suite { | |||
108 | unsigned int count; | 109 | unsigned int count; |
109 | }; | 110 | }; |
110 | 111 | ||
112 | struct drbg_test_suite { | ||
113 | struct drbg_testvec *vecs; | ||
114 | unsigned int count; | ||
115 | }; | ||
116 | |||
111 | struct alg_test_desc { | 117 | struct alg_test_desc { |
112 | const char *alg; | 118 | const char *alg; |
113 | int (*test)(const struct alg_test_desc *desc, const char *driver, | 119 | int (*test)(const struct alg_test_desc *desc, const char *driver, |
@@ -121,6 +127,7 @@ struct alg_test_desc { | |||
121 | struct pcomp_test_suite pcomp; | 127 | struct pcomp_test_suite pcomp; |
122 | struct hash_test_suite hash; | 128 | struct hash_test_suite hash; |
123 | struct cprng_test_suite cprng; | 129 | struct cprng_test_suite cprng; |
130 | struct drbg_test_suite drbg; | ||
124 | } suite; | 131 | } suite; |
125 | }; | 132 | }; |
126 | 133 | ||
@@ -191,13 +198,20 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
191 | const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); | 198 | const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); |
192 | unsigned int i, j, k, temp; | 199 | unsigned int i, j, k, temp; |
193 | struct scatterlist sg[8]; | 200 | struct scatterlist sg[8]; |
194 | char result[64]; | 201 | char *result; |
202 | char *key; | ||
195 | struct ahash_request *req; | 203 | struct ahash_request *req; |
196 | struct tcrypt_result tresult; | 204 | struct tcrypt_result tresult; |
197 | void *hash_buff; | 205 | void *hash_buff; |
198 | char *xbuf[XBUFSIZE]; | 206 | char *xbuf[XBUFSIZE]; |
199 | int ret = -ENOMEM; | 207 | int ret = -ENOMEM; |
200 | 208 | ||
209 | result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL); | ||
210 | if (!result) | ||
211 | return ret; | ||
212 | key = kmalloc(MAX_KEYLEN, GFP_KERNEL); | ||
213 | if (!key) | ||
214 | goto out_nobuf; | ||
201 | if (testmgr_alloc_buf(xbuf)) | 215 | if (testmgr_alloc_buf(xbuf)) |
202 | goto out_nobuf; | 216 | goto out_nobuf; |
203 | 217 | ||
@@ -222,7 +236,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
222 | goto out; | 236 | goto out; |
223 | 237 | ||
224 | j++; | 238 | j++; |
225 | memset(result, 0, 64); | 239 | memset(result, 0, MAX_DIGEST_SIZE); |
226 | 240 | ||
227 | hash_buff = xbuf[0]; | 241 | hash_buff = xbuf[0]; |
228 | hash_buff += align_offset; | 242 | hash_buff += align_offset; |
@@ -232,8 +246,14 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
232 | 246 | ||
233 | if (template[i].ksize) { | 247 | if (template[i].ksize) { |
234 | crypto_ahash_clear_flags(tfm, ~0); | 248 | crypto_ahash_clear_flags(tfm, ~0); |
235 | ret = crypto_ahash_setkey(tfm, template[i].key, | 249 | if (template[i].ksize > MAX_KEYLEN) { |
236 | template[i].ksize); | 250 | pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", |
251 | j, algo, template[i].ksize, MAX_KEYLEN); | ||
252 | ret = -EINVAL; | ||
253 | goto out; | ||
254 | } | ||
255 | memcpy(key, template[i].key, template[i].ksize); | ||
256 | ret = crypto_ahash_setkey(tfm, key, template[i].ksize); | ||
237 | if (ret) { | 257 | if (ret) { |
238 | printk(KERN_ERR "alg: hash: setkey failed on " | 258 | printk(KERN_ERR "alg: hash: setkey failed on " |
239 | "test %d for %s: ret=%d\n", j, algo, | 259 | "test %d for %s: ret=%d\n", j, algo, |
@@ -293,7 +313,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
293 | 313 | ||
294 | if (template[i].np) { | 314 | if (template[i].np) { |
295 | j++; | 315 | j++; |
296 | memset(result, 0, 64); | 316 | memset(result, 0, MAX_DIGEST_SIZE); |
297 | 317 | ||
298 | temp = 0; | 318 | temp = 0; |
299 | sg_init_table(sg, template[i].np); | 319 | sg_init_table(sg, template[i].np); |
@@ -312,8 +332,16 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
312 | } | 332 | } |
313 | 333 | ||
314 | if (template[i].ksize) { | 334 | if (template[i].ksize) { |
335 | if (template[i].ksize > MAX_KEYLEN) { | ||
336 | pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", | ||
337 | j, algo, template[i].ksize, | ||
338 | MAX_KEYLEN); | ||
339 | ret = -EINVAL; | ||
340 | goto out; | ||
341 | } | ||
315 | crypto_ahash_clear_flags(tfm, ~0); | 342 | crypto_ahash_clear_flags(tfm, ~0); |
316 | ret = crypto_ahash_setkey(tfm, template[i].key, | 343 | memcpy(key, template[i].key, template[i].ksize); |
344 | ret = crypto_ahash_setkey(tfm, key, | ||
317 | template[i].ksize); | 345 | template[i].ksize); |
318 | 346 | ||
319 | if (ret) { | 347 | if (ret) { |
@@ -365,6 +393,8 @@ out: | |||
365 | out_noreq: | 393 | out_noreq: |
366 | testmgr_free_buf(xbuf); | 394 | testmgr_free_buf(xbuf); |
367 | out_nobuf: | 395 | out_nobuf: |
396 | kfree(key); | ||
397 | kfree(result); | ||
368 | return ret; | 398 | return ret; |
369 | } | 399 | } |
370 | 400 | ||
@@ -422,6 +452,9 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
422 | iv = kzalloc(MAX_IVLEN, GFP_KERNEL); | 452 | iv = kzalloc(MAX_IVLEN, GFP_KERNEL); |
423 | if (!iv) | 453 | if (!iv) |
424 | return ret; | 454 | return ret; |
455 | key = kmalloc(MAX_KEYLEN, GFP_KERNEL); | ||
456 | if (!key) | ||
457 | goto out_noxbuf; | ||
425 | if (testmgr_alloc_buf(xbuf)) | 458 | if (testmgr_alloc_buf(xbuf)) |
426 | goto out_noxbuf; | 459 | goto out_noxbuf; |
427 | if (testmgr_alloc_buf(axbuf)) | 460 | if (testmgr_alloc_buf(axbuf)) |
@@ -486,7 +519,14 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
486 | crypto_aead_set_flags( | 519 | crypto_aead_set_flags( |
487 | tfm, CRYPTO_TFM_REQ_WEAK_KEY); | 520 | tfm, CRYPTO_TFM_REQ_WEAK_KEY); |
488 | 521 | ||
489 | key = template[i].key; | 522 | if (template[i].klen > MAX_KEYLEN) { |
523 | pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", | ||
524 | d, j, algo, template[i].klen, | ||
525 | MAX_KEYLEN); | ||
526 | ret = -EINVAL; | ||
527 | goto out; | ||
528 | } | ||
529 | memcpy(key, template[i].key, template[i].klen); | ||
490 | 530 | ||
491 | ret = crypto_aead_setkey(tfm, key, | 531 | ret = crypto_aead_setkey(tfm, key, |
492 | template[i].klen); | 532 | template[i].klen); |
@@ -587,7 +627,14 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
587 | if (template[i].wk) | 627 | if (template[i].wk) |
588 | crypto_aead_set_flags( | 628 | crypto_aead_set_flags( |
589 | tfm, CRYPTO_TFM_REQ_WEAK_KEY); | 629 | tfm, CRYPTO_TFM_REQ_WEAK_KEY); |
590 | key = template[i].key; | 630 | if (template[i].klen > MAX_KEYLEN) { |
631 | pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", | ||
632 | d, j, algo, template[i].klen, | ||
633 | MAX_KEYLEN); | ||
634 | ret = -EINVAL; | ||
635 | goto out; | ||
636 | } | ||
637 | memcpy(key, template[i].key, template[i].klen); | ||
591 | 638 | ||
592 | ret = crypto_aead_setkey(tfm, key, template[i].klen); | 639 | ret = crypto_aead_setkey(tfm, key, template[i].klen); |
593 | if (!ret == template[i].fail) { | 640 | if (!ret == template[i].fail) { |
@@ -769,6 +816,7 @@ out_nooutbuf: | |||
769 | out_noaxbuf: | 816 | out_noaxbuf: |
770 | testmgr_free_buf(xbuf); | 817 | testmgr_free_buf(xbuf); |
771 | out_noxbuf: | 818 | out_noxbuf: |
819 | kfree(key); | ||
772 | kfree(iv); | 820 | kfree(iv); |
773 | return ret; | 821 | return ret; |
774 | } | 822 | } |
@@ -1715,6 +1763,100 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, | |||
1715 | return err; | 1763 | return err; |
1716 | } | 1764 | } |
1717 | 1765 | ||
1766 | |||
1767 | static int drbg_cavs_test(struct drbg_testvec *test, int pr, | ||
1768 | const char *driver, u32 type, u32 mask) | ||
1769 | { | ||
1770 | int ret = -EAGAIN; | ||
1771 | struct crypto_rng *drng; | ||
1772 | struct drbg_test_data test_data; | ||
1773 | struct drbg_string addtl, pers, testentropy; | ||
1774 | unsigned char *buf = kzalloc(test->expectedlen, GFP_KERNEL); | ||
1775 | |||
1776 | if (!buf) | ||
1777 | return -ENOMEM; | ||
1778 | |||
1779 | drng = crypto_alloc_rng(driver, type, mask); | ||
1780 | if (IS_ERR(drng)) { | ||
1781 | printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " | ||
1782 | "%s\n", driver); | ||
1783 | kzfree(buf); | ||
1784 | return -ENOMEM; | ||
1785 | } | ||
1786 | |||
1787 | test_data.testentropy = &testentropy; | ||
1788 | drbg_string_fill(&testentropy, test->entropy, test->entropylen); | ||
1789 | drbg_string_fill(&pers, test->pers, test->perslen); | ||
1790 | ret = crypto_drbg_reset_test(drng, &pers, &test_data); | ||
1791 | if (ret) { | ||
1792 | printk(KERN_ERR "alg: drbg: Failed to reset rng\n"); | ||
1793 | goto outbuf; | ||
1794 | } | ||
1795 | |||
1796 | drbg_string_fill(&addtl, test->addtla, test->addtllen); | ||
1797 | if (pr) { | ||
1798 | drbg_string_fill(&testentropy, test->entpra, test->entprlen); | ||
1799 | ret = crypto_drbg_get_bytes_addtl_test(drng, | ||
1800 | buf, test->expectedlen, &addtl, &test_data); | ||
1801 | } else { | ||
1802 | ret = crypto_drbg_get_bytes_addtl(drng, | ||
1803 | buf, test->expectedlen, &addtl); | ||
1804 | } | ||
1805 | if (ret <= 0) { | ||
1806 | printk(KERN_ERR "alg: drbg: could not obtain random data for " | ||
1807 | "driver %s\n", driver); | ||
1808 | goto outbuf; | ||
1809 | } | ||
1810 | |||
1811 | drbg_string_fill(&addtl, test->addtlb, test->addtllen); | ||
1812 | if (pr) { | ||
1813 | drbg_string_fill(&testentropy, test->entprb, test->entprlen); | ||
1814 | ret = crypto_drbg_get_bytes_addtl_test(drng, | ||
1815 | buf, test->expectedlen, &addtl, &test_data); | ||
1816 | } else { | ||
1817 | ret = crypto_drbg_get_bytes_addtl(drng, | ||
1818 | buf, test->expectedlen, &addtl); | ||
1819 | } | ||
1820 | if (ret <= 0) { | ||
1821 | printk(KERN_ERR "alg: drbg: could not obtain random data for " | ||
1822 | "driver %s\n", driver); | ||
1823 | goto outbuf; | ||
1824 | } | ||
1825 | |||
1826 | ret = memcmp(test->expected, buf, test->expectedlen); | ||
1827 | |||
1828 | outbuf: | ||
1829 | crypto_free_rng(drng); | ||
1830 | kzfree(buf); | ||
1831 | return ret; | ||
1832 | } | ||
1833 | |||
1834 | |||
1835 | static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver, | ||
1836 | u32 type, u32 mask) | ||
1837 | { | ||
1838 | int err = 0; | ||
1839 | int pr = 0; | ||
1840 | int i = 0; | ||
1841 | struct drbg_testvec *template = desc->suite.drbg.vecs; | ||
1842 | unsigned int tcount = desc->suite.drbg.count; | ||
1843 | |||
1844 | if (0 == memcmp(driver, "drbg_pr_", 8)) | ||
1845 | pr = 1; | ||
1846 | |||
1847 | for (i = 0; i < tcount; i++) { | ||
1848 | err = drbg_cavs_test(&template[i], pr, driver, type, mask); | ||
1849 | if (err) { | ||
1850 | printk(KERN_ERR "alg: drbg: Test %d failed for %s\n", | ||
1851 | i, driver); | ||
1852 | err = -EINVAL; | ||
1853 | break; | ||
1854 | } | ||
1855 | } | ||
1856 | return err; | ||
1857 | |||
1858 | } | ||
1859 | |||
1718 | static int alg_test_null(const struct alg_test_desc *desc, | 1860 | static int alg_test_null(const struct alg_test_desc *desc, |
1719 | const char *driver, u32 type, u32 mask) | 1861 | const char *driver, u32 type, u32 mask) |
1720 | { | 1862 | { |
@@ -2458,6 +2600,152 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2458 | .alg = "digest_null", | 2600 | .alg = "digest_null", |
2459 | .test = alg_test_null, | 2601 | .test = alg_test_null, |
2460 | }, { | 2602 | }, { |
2603 | .alg = "drbg_nopr_ctr_aes128", | ||
2604 | .test = alg_test_drbg, | ||
2605 | .fips_allowed = 1, | ||
2606 | .suite = { | ||
2607 | .drbg = { | ||
2608 | .vecs = drbg_nopr_ctr_aes128_tv_template, | ||
2609 | .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template) | ||
2610 | } | ||
2611 | } | ||
2612 | }, { | ||
2613 | .alg = "drbg_nopr_ctr_aes192", | ||
2614 | .test = alg_test_drbg, | ||
2615 | .fips_allowed = 1, | ||
2616 | .suite = { | ||
2617 | .drbg = { | ||
2618 | .vecs = drbg_nopr_ctr_aes192_tv_template, | ||
2619 | .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template) | ||
2620 | } | ||
2621 | } | ||
2622 | }, { | ||
2623 | .alg = "drbg_nopr_ctr_aes256", | ||
2624 | .test = alg_test_drbg, | ||
2625 | .fips_allowed = 1, | ||
2626 | .suite = { | ||
2627 | .drbg = { | ||
2628 | .vecs = drbg_nopr_ctr_aes256_tv_template, | ||
2629 | .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template) | ||
2630 | } | ||
2631 | } | ||
2632 | }, { | ||
2633 | /* | ||
2634 | * There is no need to specifically test the DRBG with every | ||
2635 | * backend cipher -- covered by drbg_nopr_hmac_sha256 test | ||
2636 | */ | ||
2637 | .alg = "drbg_nopr_hmac_sha1", | ||
2638 | .fips_allowed = 1, | ||
2639 | .test = alg_test_null, | ||
2640 | }, { | ||
2641 | .alg = "drbg_nopr_hmac_sha256", | ||
2642 | .test = alg_test_drbg, | ||
2643 | .fips_allowed = 1, | ||
2644 | .suite = { | ||
2645 | .drbg = { | ||
2646 | .vecs = drbg_nopr_hmac_sha256_tv_template, | ||
2647 | .count = | ||
2648 | ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template) | ||
2649 | } | ||
2650 | } | ||
2651 | }, { | ||
2652 | /* covered by drbg_nopr_hmac_sha256 test */ | ||
2653 | .alg = "drbg_nopr_hmac_sha384", | ||
2654 | .fips_allowed = 1, | ||
2655 | .test = alg_test_null, | ||
2656 | }, { | ||
2657 | .alg = "drbg_nopr_hmac_sha512", | ||
2658 | .test = alg_test_null, | ||
2659 | .fips_allowed = 1, | ||
2660 | }, { | ||
2661 | .alg = "drbg_nopr_sha1", | ||
2662 | .fips_allowed = 1, | ||
2663 | .test = alg_test_null, | ||
2664 | }, { | ||
2665 | .alg = "drbg_nopr_sha256", | ||
2666 | .test = alg_test_drbg, | ||
2667 | .fips_allowed = 1, | ||
2668 | .suite = { | ||
2669 | .drbg = { | ||
2670 | .vecs = drbg_nopr_sha256_tv_template, | ||
2671 | .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template) | ||
2672 | } | ||
2673 | } | ||
2674 | }, { | ||
2675 | /* covered by drbg_nopr_sha256 test */ | ||
2676 | .alg = "drbg_nopr_sha384", | ||
2677 | .fips_allowed = 1, | ||
2678 | .test = alg_test_null, | ||
2679 | }, { | ||
2680 | .alg = "drbg_nopr_sha512", | ||
2681 | .fips_allowed = 1, | ||
2682 | .test = alg_test_null, | ||
2683 | }, { | ||
2684 | .alg = "drbg_pr_ctr_aes128", | ||
2685 | .test = alg_test_drbg, | ||
2686 | .fips_allowed = 1, | ||
2687 | .suite = { | ||
2688 | .drbg = { | ||
2689 | .vecs = drbg_pr_ctr_aes128_tv_template, | ||
2690 | .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template) | ||
2691 | } | ||
2692 | } | ||
2693 | }, { | ||
2694 | /* covered by drbg_pr_ctr_aes128 test */ | ||
2695 | .alg = "drbg_pr_ctr_aes192", | ||
2696 | .fips_allowed = 1, | ||
2697 | .test = alg_test_null, | ||
2698 | }, { | ||
2699 | .alg = "drbg_pr_ctr_aes256", | ||
2700 | .fips_allowed = 1, | ||
2701 | .test = alg_test_null, | ||
2702 | }, { | ||
2703 | .alg = "drbg_pr_hmac_sha1", | ||
2704 | .fips_allowed = 1, | ||
2705 | .test = alg_test_null, | ||
2706 | }, { | ||
2707 | .alg = "drbg_pr_hmac_sha256", | ||
2708 | .test = alg_test_drbg, | ||
2709 | .fips_allowed = 1, | ||
2710 | .suite = { | ||
2711 | .drbg = { | ||
2712 | .vecs = drbg_pr_hmac_sha256_tv_template, | ||
2713 | .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template) | ||
2714 | } | ||
2715 | } | ||
2716 | }, { | ||
2717 | /* covered by drbg_pr_hmac_sha256 test */ | ||
2718 | .alg = "drbg_pr_hmac_sha384", | ||
2719 | .fips_allowed = 1, | ||
2720 | .test = alg_test_null, | ||
2721 | }, { | ||
2722 | .alg = "drbg_pr_hmac_sha512", | ||
2723 | .test = alg_test_null, | ||
2724 | .fips_allowed = 1, | ||
2725 | }, { | ||
2726 | .alg = "drbg_pr_sha1", | ||
2727 | .fips_allowed = 1, | ||
2728 | .test = alg_test_null, | ||
2729 | }, { | ||
2730 | .alg = "drbg_pr_sha256", | ||
2731 | .test = alg_test_drbg, | ||
2732 | .fips_allowed = 1, | ||
2733 | .suite = { | ||
2734 | .drbg = { | ||
2735 | .vecs = drbg_pr_sha256_tv_template, | ||
2736 | .count = ARRAY_SIZE(drbg_pr_sha256_tv_template) | ||
2737 | } | ||
2738 | } | ||
2739 | }, { | ||
2740 | /* covered by drbg_pr_sha256 test */ | ||
2741 | .alg = "drbg_pr_sha384", | ||
2742 | .fips_allowed = 1, | ||
2743 | .test = alg_test_null, | ||
2744 | }, { | ||
2745 | .alg = "drbg_pr_sha512", | ||
2746 | .fips_allowed = 1, | ||
2747 | .test = alg_test_null, | ||
2748 | }, { | ||
2461 | .alg = "ecb(__aes-aesni)", | 2749 | .alg = "ecb(__aes-aesni)", |
2462 | .test = alg_test_null, | 2750 | .test = alg_test_null, |
2463 | .fips_allowed = 1, | 2751 | .fips_allowed = 1, |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 69d0dd8ef27e..6597203eccfa 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define MAX_DIGEST_SIZE 64 | 32 | #define MAX_DIGEST_SIZE 64 |
33 | #define MAX_TAP 8 | 33 | #define MAX_TAP 8 |
34 | 34 | ||
35 | #define MAX_KEYLEN 56 | 35 | #define MAX_KEYLEN 160 |
36 | #define MAX_IVLEN 32 | 36 | #define MAX_IVLEN 32 |
37 | 37 | ||
38 | struct hash_testvec { | 38 | struct hash_testvec { |
@@ -92,6 +92,21 @@ struct cprng_testvec { | |||
92 | unsigned short loops; | 92 | unsigned short loops; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | struct drbg_testvec { | ||
96 | unsigned char *entropy; | ||
97 | size_t entropylen; | ||
98 | unsigned char *entpra; | ||
99 | unsigned char *entprb; | ||
100 | size_t entprlen; | ||
101 | unsigned char *addtla; | ||
102 | unsigned char *addtlb; | ||
103 | size_t addtllen; | ||
104 | unsigned char *pers; | ||
105 | size_t perslen; | ||
106 | unsigned char *expected; | ||
107 | size_t expectedlen; | ||
108 | }; | ||
109 | |||
95 | static char zeroed_string[48]; | 110 | static char zeroed_string[48]; |
96 | 111 | ||
97 | /* | 112 | /* |
@@ -1807,18 +1822,59 @@ static struct hash_testvec tgr128_tv_template[] = { | |||
1807 | }, | 1822 | }, |
1808 | }; | 1823 | }; |
1809 | 1824 | ||
1810 | #define GHASH_TEST_VECTORS 1 | 1825 | #define GHASH_TEST_VECTORS 5 |
1811 | 1826 | ||
1812 | static struct hash_testvec ghash_tv_template[] = | 1827 | static struct hash_testvec ghash_tv_template[] = |
1813 | { | 1828 | { |
1814 | { | 1829 | { |
1815 | 1830 | .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03" | |
1816 | .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61", | 1831 | "\xff\xca\xff\x95\xf8\x30\xf0\x61", |
1817 | .ksize = 16, | 1832 | .ksize = 16, |
1818 | .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6", | 1833 | .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0" |
1834 | "\xb3\x2b\x66\x56\xa0\x5b\x40\xb6", | ||
1819 | .psize = 16, | 1835 | .psize = 16, |
1820 | .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6" | 1836 | .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6" |
1821 | "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60", | 1837 | "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60", |
1838 | }, { | ||
1839 | .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" | ||
1840 | "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", | ||
1841 | .ksize = 16, | ||
1842 | .plaintext = "what do ya want for nothing?", | ||
1843 | .psize = 28, | ||
1844 | .digest = "\x3e\x1f\x5c\x4d\x65\xf0\xef\xce" | ||
1845 | "\x0d\x61\x06\x27\x66\x51\xd5\xe2", | ||
1846 | .np = 2, | ||
1847 | .tap = {14, 14} | ||
1848 | }, { | ||
1849 | .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" | ||
1850 | "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", | ||
1851 | .ksize = 16, | ||
1852 | .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" | ||
1853 | "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" | ||
1854 | "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" | ||
1855 | "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd", | ||
1856 | .psize = 50, | ||
1857 | .digest = "\xfb\x49\x8a\x36\xe1\x96\xe1\x96" | ||
1858 | "\xe1\x96\xe1\x96\xe1\x96\xe1\x96", | ||
1859 | }, { | ||
1860 | .key = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6" | ||
1861 | "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60", | ||
1862 | .ksize = 16, | ||
1863 | .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" | ||
1864 | "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" | ||
1865 | "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" | ||
1866 | "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd", | ||
1867 | .psize = 50, | ||
1868 | .digest = "\x2b\x5c\x0c\x7f\x52\xd1\x60\xc2" | ||
1869 | "\x49\xed\x6e\x32\x7a\xa9\xbe\x08", | ||
1870 | }, { | ||
1871 | .key = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0" | ||
1872 | "\xb3\x2b\x66\x56\xa0\x5b\x40\xb6", | ||
1873 | .ksize = 16, | ||
1874 | .plaintext = "Test With Truncation", | ||
1875 | .psize = 20, | ||
1876 | .digest = "\xf8\x94\x87\x2a\x4b\x63\x99\x28" | ||
1877 | "\x23\xf7\x93\xf7\x19\xf5\x96\xd9", | ||
1822 | }, | 1878 | }, |
1823 | }; | 1879 | }; |
1824 | 1880 | ||
@@ -3097,8 +3153,8 @@ static struct cipher_testvec des_enc_tv_template[] = { | |||
3097 | "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A", | 3153 | "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A", |
3098 | .rlen = 248, | 3154 | .rlen = 248, |
3099 | .also_non_np = 1, | 3155 | .also_non_np = 1, |
3100 | .np = 2, | 3156 | .np = 3, |
3101 | .tap = { 248 - 8, 8 }, | 3157 | .tap = { 248 - 10, 2, 8 }, |
3102 | }, | 3158 | }, |
3103 | }; | 3159 | }; |
3104 | 3160 | ||
@@ -3207,8 +3263,8 @@ static struct cipher_testvec des_dec_tv_template[] = { | |||
3207 | "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB", | 3263 | "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB", |
3208 | .rlen = 248, | 3264 | .rlen = 248, |
3209 | .also_non_np = 1, | 3265 | .also_non_np = 1, |
3210 | .np = 2, | 3266 | .np = 3, |
3211 | .tap = { 248 - 8, 8 }, | 3267 | .tap = { 248 - 10, 2, 8 }, |
3212 | }, | 3268 | }, |
3213 | }; | 3269 | }; |
3214 | 3270 | ||
@@ -3333,8 +3389,8 @@ static struct cipher_testvec des_cbc_enc_tv_template[] = { | |||
3333 | "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63", | 3389 | "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63", |
3334 | .rlen = 248, | 3390 | .rlen = 248, |
3335 | .also_non_np = 1, | 3391 | .also_non_np = 1, |
3336 | .np = 2, | 3392 | .np = 3, |
3337 | .tap = { 248 - 8, 8 }, | 3393 | .tap = { 248 - 10, 2, 8 }, |
3338 | }, | 3394 | }, |
3339 | }; | 3395 | }; |
3340 | 3396 | ||
@@ -3442,8 +3498,8 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = { | |||
3442 | "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB", | 3498 | "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB", |
3443 | .rlen = 248, | 3499 | .rlen = 248, |
3444 | .also_non_np = 1, | 3500 | .also_non_np = 1, |
3445 | .np = 2, | 3501 | .np = 3, |
3446 | .tap = { 248 - 8, 8 }, | 3502 | .tap = { 248 - 10, 2, 8 }, |
3447 | }, | 3503 | }, |
3448 | }; | 3504 | }; |
3449 | 3505 | ||
@@ -3517,8 +3573,8 @@ static struct cipher_testvec des_ctr_enc_tv_template[] = { | |||
3517 | "\x69\x74\xA1\x06\x46\x0F\x4E\x75", | 3573 | "\x69\x74\xA1\x06\x46\x0F\x4E\x75", |
3518 | .rlen = 248, | 3574 | .rlen = 248, |
3519 | .also_non_np = 1, | 3575 | .also_non_np = 1, |
3520 | .np = 2, | 3576 | .np = 3, |
3521 | .tap = { 248 - 8, 8 }, | 3577 | .tap = { 248 - 10, 2, 8 }, |
3522 | }, { /* Generated with Crypto++ */ | 3578 | }, { /* Generated with Crypto++ */ |
3523 | .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", | 3579 | .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", |
3524 | .klen = 8, | 3580 | .klen = 8, |
@@ -3663,8 +3719,8 @@ static struct cipher_testvec des_ctr_dec_tv_template[] = { | |||
3663 | "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB", | 3719 | "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB", |
3664 | .rlen = 248, | 3720 | .rlen = 248, |
3665 | .also_non_np = 1, | 3721 | .also_non_np = 1, |
3666 | .np = 2, | 3722 | .np = 3, |
3667 | .tap = { 248 - 8, 8 }, | 3723 | .tap = { 248 - 10, 2, 8 }, |
3668 | }, { /* Generated with Crypto++ */ | 3724 | }, { /* Generated with Crypto++ */ |
3669 | .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", | 3725 | .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", |
3670 | .klen = 8, | 3726 | .klen = 8, |
@@ -3899,8 +3955,8 @@ static struct cipher_testvec des3_ede_enc_tv_template[] = { | |||
3899 | "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63", | 3955 | "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63", |
3900 | .rlen = 496, | 3956 | .rlen = 496, |
3901 | .also_non_np = 1, | 3957 | .also_non_np = 1, |
3902 | .np = 2, | 3958 | .np = 3, |
3903 | .tap = { 496 - 16, 16 }, | 3959 | .tap = { 496 - 20, 4, 16 }, |
3904 | }, | 3960 | }, |
3905 | }; | 3961 | }; |
3906 | 3962 | ||
@@ -4064,8 +4120,8 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = { | |||
4064 | "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47", | 4120 | "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47", |
4065 | .rlen = 496, | 4121 | .rlen = 496, |
4066 | .also_non_np = 1, | 4122 | .also_non_np = 1, |
4067 | .np = 2, | 4123 | .np = 3, |
4068 | .tap = { 496 - 16, 16 }, | 4124 | .tap = { 496 - 20, 4, 16 }, |
4069 | }, | 4125 | }, |
4070 | }; | 4126 | }; |
4071 | 4127 | ||
@@ -4244,8 +4300,8 @@ static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = { | |||
4244 | "\x95\x63\x73\xA2\x44\xAC\xF8\xA5", | 4300 | "\x95\x63\x73\xA2\x44\xAC\xF8\xA5", |
4245 | .rlen = 496, | 4301 | .rlen = 496, |
4246 | .also_non_np = 1, | 4302 | .also_non_np = 1, |
4247 | .np = 2, | 4303 | .np = 3, |
4248 | .tap = { 496 - 16, 16 }, | 4304 | .tap = { 496 - 20, 4, 16 }, |
4249 | }, | 4305 | }, |
4250 | }; | 4306 | }; |
4251 | 4307 | ||
@@ -4424,8 +4480,8 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = { | |||
4424 | "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47", | 4480 | "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47", |
4425 | .rlen = 496, | 4481 | .rlen = 496, |
4426 | .also_non_np = 1, | 4482 | .also_non_np = 1, |
4427 | .np = 2, | 4483 | .np = 3, |
4428 | .tap = { 496 - 16, 16 }, | 4484 | .tap = { 496 - 20, 4, 16 }, |
4429 | }, | 4485 | }, |
4430 | }; | 4486 | }; |
4431 | 4487 | ||
@@ -4564,8 +4620,8 @@ static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = { | |||
4564 | "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34", | 4620 | "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34", |
4565 | .rlen = 496, | 4621 | .rlen = 496, |
4566 | .also_non_np = 1, | 4622 | .also_non_np = 1, |
4567 | .np = 2, | 4623 | .np = 3, |
4568 | .tap = { 496 - 16, 16 }, | 4624 | .tap = { 496 - 20, 4, 16 }, |
4569 | }, { /* Generated with Crypto++ */ | 4625 | }, { /* Generated with Crypto++ */ |
4570 | .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" | 4626 | .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" |
4571 | "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" | 4627 | "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" |
@@ -4842,8 +4898,8 @@ static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = { | |||
4842 | "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47", | 4898 | "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47", |
4843 | .rlen = 496, | 4899 | .rlen = 496, |
4844 | .also_non_np = 1, | 4900 | .also_non_np = 1, |
4845 | .np = 2, | 4901 | .np = 3, |
4846 | .tap = { 496 - 16, 16 }, | 4902 | .tap = { 496 - 20, 4, 16 }, |
4847 | }, { /* Generated with Crypto++ */ | 4903 | }, { /* Generated with Crypto++ */ |
4848 | .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" | 4904 | .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" |
4849 | "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" | 4905 | "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" |
@@ -5182,8 +5238,8 @@ static struct cipher_testvec bf_enc_tv_template[] = { | |||
5182 | "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4", | 5238 | "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4", |
5183 | .rlen = 504, | 5239 | .rlen = 504, |
5184 | .also_non_np = 1, | 5240 | .also_non_np = 1, |
5185 | .np = 2, | 5241 | .np = 3, |
5186 | .tap = { 504 - 8, 8 }, | 5242 | .tap = { 504 - 10, 2, 8 }, |
5187 | }, | 5243 | }, |
5188 | }; | 5244 | }; |
5189 | 5245 | ||
@@ -5374,8 +5430,8 @@ static struct cipher_testvec bf_dec_tv_template[] = { | |||
5374 | "\x2B\xC2\x59\xF0\x64\xFB\x92\x06", | 5430 | "\x2B\xC2\x59\xF0\x64\xFB\x92\x06", |
5375 | .rlen = 504, | 5431 | .rlen = 504, |
5376 | .also_non_np = 1, | 5432 | .also_non_np = 1, |
5377 | .np = 2, | 5433 | .np = 3, |
5378 | .tap = { 504 - 8, 8 }, | 5434 | .tap = { 504 - 10, 2, 8 }, |
5379 | }, | 5435 | }, |
5380 | }; | 5436 | }; |
5381 | 5437 | ||
@@ -5531,8 +5587,8 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = { | |||
5531 | "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4", | 5587 | "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4", |
5532 | .rlen = 504, | 5588 | .rlen = 504, |
5533 | .also_non_np = 1, | 5589 | .also_non_np = 1, |
5534 | .np = 2, | 5590 | .np = 3, |
5535 | .tap = { 504 - 8, 8 }, | 5591 | .tap = { 504 - 10, 2, 8 }, |
5536 | }, | 5592 | }, |
5537 | }; | 5593 | }; |
5538 | 5594 | ||
@@ -5688,8 +5744,8 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = { | |||
5688 | "\x2B\xC2\x59\xF0\x64\xFB\x92\x06", | 5744 | "\x2B\xC2\x59\xF0\x64\xFB\x92\x06", |
5689 | .rlen = 504, | 5745 | .rlen = 504, |
5690 | .also_non_np = 1, | 5746 | .also_non_np = 1, |
5691 | .np = 2, | 5747 | .np = 3, |
5692 | .tap = { 504 - 8, 8 }, | 5748 | .tap = { 504 - 10, 2, 8 }, |
5693 | }, | 5749 | }, |
5694 | }; | 5750 | }; |
5695 | 5751 | ||
@@ -6694,8 +6750,8 @@ static struct cipher_testvec tf_enc_tv_template[] = { | |||
6694 | "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5", | 6750 | "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5", |
6695 | .rlen = 496, | 6751 | .rlen = 496, |
6696 | .also_non_np = 1, | 6752 | .also_non_np = 1, |
6697 | .np = 2, | 6753 | .np = 3, |
6698 | .tap = { 496 - 16, 16 }, | 6754 | .tap = { 496 - 20, 4, 16 }, |
6699 | }, | 6755 | }, |
6700 | }; | 6756 | }; |
6701 | 6757 | ||
@@ -6862,8 +6918,8 @@ static struct cipher_testvec tf_dec_tv_template[] = { | |||
6862 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 6918 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
6863 | .rlen = 496, | 6919 | .rlen = 496, |
6864 | .also_non_np = 1, | 6920 | .also_non_np = 1, |
6865 | .np = 2, | 6921 | .np = 3, |
6866 | .tap = { 496 - 16, 16 }, | 6922 | .tap = { 496 - 20, 4, 16 }, |
6867 | }, | 6923 | }, |
6868 | }; | 6924 | }; |
6869 | 6925 | ||
@@ -7045,8 +7101,8 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = { | |||
7045 | "\x0A\xA3\x30\x10\x26\x25\x41\x2C", | 7101 | "\x0A\xA3\x30\x10\x26\x25\x41\x2C", |
7046 | .rlen = 496, | 7102 | .rlen = 496, |
7047 | .also_non_np = 1, | 7103 | .also_non_np = 1, |
7048 | .np = 2, | 7104 | .np = 3, |
7049 | .tap = { 496 - 16, 16 }, | 7105 | .tap = { 496 - 20, 4, 16 }, |
7050 | }, | 7106 | }, |
7051 | }; | 7107 | }; |
7052 | 7108 | ||
@@ -7228,8 +7284,8 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = { | |||
7228 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 7284 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
7229 | .rlen = 496, | 7285 | .rlen = 496, |
7230 | .also_non_np = 1, | 7286 | .also_non_np = 1, |
7231 | .np = 2, | 7287 | .np = 3, |
7232 | .tap = { 496 - 16, 16 }, | 7288 | .tap = { 496 - 20, 4, 16 }, |
7233 | }, | 7289 | }, |
7234 | }; | 7290 | }; |
7235 | 7291 | ||
@@ -8302,8 +8358,8 @@ static struct cipher_testvec tf_lrw_enc_tv_template[] = { | |||
8302 | "\x11\xd7\xb8\x6e\xea\xe1\x80\x30", | 8358 | "\x11\xd7\xb8\x6e\xea\xe1\x80\x30", |
8303 | .rlen = 512, | 8359 | .rlen = 512, |
8304 | .also_non_np = 1, | 8360 | .also_non_np = 1, |
8305 | .np = 2, | 8361 | .np = 3, |
8306 | .tap = { 512 - 16, 16 }, | 8362 | .tap = { 512 - 20, 4, 16 }, |
8307 | }, | 8363 | }, |
8308 | }; | 8364 | }; |
8309 | 8365 | ||
@@ -8555,8 +8611,8 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = { | |||
8555 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", | 8611 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", |
8556 | .rlen = 512, | 8612 | .rlen = 512, |
8557 | .also_non_np = 1, | 8613 | .also_non_np = 1, |
8558 | .np = 2, | 8614 | .np = 3, |
8559 | .tap = { 512 - 16, 16 }, | 8615 | .tap = { 512 - 20, 4, 16 }, |
8560 | }, | 8616 | }, |
8561 | }; | 8617 | }; |
8562 | 8618 | ||
@@ -8897,8 +8953,8 @@ static struct cipher_testvec tf_xts_enc_tv_template[] = { | |||
8897 | "\x37\x30\xe1\x91\x8d\xb3\x2a\xff", | 8953 | "\x37\x30\xe1\x91\x8d\xb3\x2a\xff", |
8898 | .rlen = 512, | 8954 | .rlen = 512, |
8899 | .also_non_np = 1, | 8955 | .also_non_np = 1, |
8900 | .np = 2, | 8956 | .np = 3, |
8901 | .tap = { 512 - 16, 16 }, | 8957 | .tap = { 512 - 20, 4, 16 }, |
8902 | }, | 8958 | }, |
8903 | }; | 8959 | }; |
8904 | 8960 | ||
@@ -9240,8 +9296,8 @@ static struct cipher_testvec tf_xts_dec_tv_template[] = { | |||
9240 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | 9296 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", |
9241 | .rlen = 512, | 9297 | .rlen = 512, |
9242 | .also_non_np = 1, | 9298 | .also_non_np = 1, |
9243 | .np = 2, | 9299 | .np = 3, |
9244 | .tap = { 512 - 16, 16 }, | 9300 | .tap = { 512 - 20, 4, 16 }, |
9245 | }, | 9301 | }, |
9246 | }; | 9302 | }; |
9247 | 9303 | ||
@@ -9438,8 +9494,8 @@ static struct cipher_testvec serpent_enc_tv_template[] = { | |||
9438 | "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7", | 9494 | "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7", |
9439 | .rlen = 496, | 9495 | .rlen = 496, |
9440 | .also_non_np = 1, | 9496 | .also_non_np = 1, |
9441 | .np = 2, | 9497 | .np = 3, |
9442 | .tap = { 496 - 16, 16 }, | 9498 | .tap = { 496 - 20, 4, 16 }, |
9443 | }, | 9499 | }, |
9444 | }; | 9500 | }; |
9445 | 9501 | ||
@@ -9664,8 +9720,8 @@ static struct cipher_testvec serpent_dec_tv_template[] = { | |||
9664 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 9720 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
9665 | .rlen = 496, | 9721 | .rlen = 496, |
9666 | .also_non_np = 1, | 9722 | .also_non_np = 1, |
9667 | .np = 2, | 9723 | .np = 3, |
9668 | .tap = { 496 - 16, 16 }, | 9724 | .tap = { 496 - 20, 4, 16 }, |
9669 | }, | 9725 | }, |
9670 | }; | 9726 | }; |
9671 | 9727 | ||
@@ -9846,8 +9902,8 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = { | |||
9846 | "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1", | 9902 | "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1", |
9847 | .rlen = 496, | 9903 | .rlen = 496, |
9848 | .also_non_np = 1, | 9904 | .also_non_np = 1, |
9849 | .np = 2, | 9905 | .np = 3, |
9850 | .tap = { 496 - 16, 16 }, | 9906 | .tap = { 496 - 20, 4, 16 }, |
9851 | }, | 9907 | }, |
9852 | }; | 9908 | }; |
9853 | 9909 | ||
@@ -9987,8 +10043,8 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = { | |||
9987 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 10043 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
9988 | .rlen = 496, | 10044 | .rlen = 496, |
9989 | .also_non_np = 1, | 10045 | .also_non_np = 1, |
9990 | .np = 2, | 10046 | .np = 3, |
9991 | .tap = { 496 - 16, 16 }, | 10047 | .tap = { 496 - 20, 4, 16 }, |
9992 | }, | 10048 | }, |
9993 | }; | 10049 | }; |
9994 | 10050 | ||
@@ -11061,8 +11117,8 @@ static struct cipher_testvec serpent_lrw_enc_tv_template[] = { | |||
11061 | "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7", | 11117 | "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7", |
11062 | .rlen = 512, | 11118 | .rlen = 512, |
11063 | .also_non_np = 1, | 11119 | .also_non_np = 1, |
11064 | .np = 2, | 11120 | .np = 3, |
11065 | .tap = { 512 - 16, 16 }, | 11121 | .tap = { 512 - 20, 4, 16 }, |
11066 | }, | 11122 | }, |
11067 | }; | 11123 | }; |
11068 | 11124 | ||
@@ -11314,8 +11370,8 @@ static struct cipher_testvec serpent_lrw_dec_tv_template[] = { | |||
11314 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", | 11370 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", |
11315 | .rlen = 512, | 11371 | .rlen = 512, |
11316 | .also_non_np = 1, | 11372 | .also_non_np = 1, |
11317 | .np = 2, | 11373 | .np = 3, |
11318 | .tap = { 512 - 16, 16 }, | 11374 | .tap = { 512 - 20, 4, 16 }, |
11319 | }, | 11375 | }, |
11320 | }; | 11376 | }; |
11321 | 11377 | ||
@@ -11656,8 +11712,8 @@ static struct cipher_testvec serpent_xts_enc_tv_template[] = { | |||
11656 | "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1", | 11712 | "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1", |
11657 | .rlen = 512, | 11713 | .rlen = 512, |
11658 | .also_non_np = 1, | 11714 | .also_non_np = 1, |
11659 | .np = 2, | 11715 | .np = 3, |
11660 | .tap = { 512 - 16, 16 }, | 11716 | .tap = { 512 - 20, 4, 16 }, |
11661 | }, | 11717 | }, |
11662 | }; | 11718 | }; |
11663 | 11719 | ||
@@ -11999,8 +12055,8 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = { | |||
11999 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | 12055 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", |
12000 | .rlen = 512, | 12056 | .rlen = 512, |
12001 | .also_non_np = 1, | 12057 | .also_non_np = 1, |
12002 | .np = 2, | 12058 | .np = 3, |
12003 | .tap = { 512 - 16, 16 }, | 12059 | .tap = { 512 - 20, 4, 16 }, |
12004 | }, | 12060 | }, |
12005 | }; | 12061 | }; |
12006 | 12062 | ||
@@ -12182,8 +12238,8 @@ static struct cipher_testvec cast6_enc_tv_template[] = { | |||
12182 | "\x11\x74\x93\x57\xB4\x7E\xC6\x00", | 12238 | "\x11\x74\x93\x57\xB4\x7E\xC6\x00", |
12183 | .rlen = 496, | 12239 | .rlen = 496, |
12184 | .also_non_np = 1, | 12240 | .also_non_np = 1, |
12185 | .np = 2, | 12241 | .np = 3, |
12186 | .tap = { 496 - 16, 16 }, | 12242 | .tap = { 496 - 20, 4, 16 }, |
12187 | }, | 12243 | }, |
12188 | }; | 12244 | }; |
12189 | 12245 | ||
@@ -12353,8 +12409,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { | |||
12353 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 12409 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
12354 | .rlen = 496, | 12410 | .rlen = 496, |
12355 | .also_non_np = 1, | 12411 | .also_non_np = 1, |
12356 | .np = 2, | 12412 | .np = 3, |
12357 | .tap = { 496 - 16, 16 }, | 12413 | .tap = { 496 - 20, 4, 16 }, |
12358 | }, | 12414 | }, |
12359 | }; | 12415 | }; |
12360 | 12416 | ||
@@ -12494,8 +12550,8 @@ static struct cipher_testvec cast6_cbc_enc_tv_template[] = { | |||
12494 | "\x22\x46\x89\x2D\x0F\x2B\x08\x24", | 12550 | "\x22\x46\x89\x2D\x0F\x2B\x08\x24", |
12495 | .rlen = 496, | 12551 | .rlen = 496, |
12496 | .also_non_np = 1, | 12552 | .also_non_np = 1, |
12497 | .np = 2, | 12553 | .np = 3, |
12498 | .tap = { 496 - 16, 16 }, | 12554 | .tap = { 496 - 20, 4, 16 }, |
12499 | }, | 12555 | }, |
12500 | }; | 12556 | }; |
12501 | 12557 | ||
@@ -12635,8 +12691,8 @@ static struct cipher_testvec cast6_cbc_dec_tv_template[] = { | |||
12635 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 12691 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
12636 | .rlen = 496, | 12692 | .rlen = 496, |
12637 | .also_non_np = 1, | 12693 | .also_non_np = 1, |
12638 | .np = 2, | 12694 | .np = 3, |
12639 | .tap = { 496 - 16, 16 }, | 12695 | .tap = { 496 - 20, 4, 16 }, |
12640 | }, | 12696 | }, |
12641 | }; | 12697 | }; |
12642 | 12698 | ||
@@ -12792,8 +12848,8 @@ static struct cipher_testvec cast6_ctr_enc_tv_template[] = { | |||
12792 | "\xF9\xC5\xDD\x27\xB3\x39\xCB\xCB", | 12848 | "\xF9\xC5\xDD\x27\xB3\x39\xCB\xCB", |
12793 | .rlen = 496, | 12849 | .rlen = 496, |
12794 | .also_non_np = 1, | 12850 | .also_non_np = 1, |
12795 | .np = 2, | 12851 | .np = 3, |
12796 | .tap = { 496 - 16, 16 }, | 12852 | .tap = { 496 - 20, 4, 16 }, |
12797 | }, | 12853 | }, |
12798 | }; | 12854 | }; |
12799 | 12855 | ||
@@ -12949,8 +13005,8 @@ static struct cipher_testvec cast6_ctr_dec_tv_template[] = { | |||
12949 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 13005 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
12950 | .rlen = 496, | 13006 | .rlen = 496, |
12951 | .also_non_np = 1, | 13007 | .also_non_np = 1, |
12952 | .np = 2, | 13008 | .np = 3, |
12953 | .tap = { 496 - 16, 16 }, | 13009 | .tap = { 496 - 20, 4, 16 }, |
12954 | }, | 13010 | }, |
12955 | }; | 13011 | }; |
12956 | 13012 | ||
@@ -13096,8 +13152,8 @@ static struct cipher_testvec cast6_lrw_enc_tv_template[] = { | |||
13096 | "\xC4\xF5\x99\x61\xBC\xBB\x5B\x46", | 13152 | "\xC4\xF5\x99\x61\xBC\xBB\x5B\x46", |
13097 | .rlen = 512, | 13153 | .rlen = 512, |
13098 | .also_non_np = 1, | 13154 | .also_non_np = 1, |
13099 | .np = 2, | 13155 | .np = 3, |
13100 | .tap = { 512 - 16, 16 }, | 13156 | .tap = { 512 - 20, 4, 16 }, |
13101 | }, | 13157 | }, |
13102 | }; | 13158 | }; |
13103 | 13159 | ||
@@ -13243,8 +13299,8 @@ static struct cipher_testvec cast6_lrw_dec_tv_template[] = { | |||
13243 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", | 13299 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", |
13244 | .rlen = 512, | 13300 | .rlen = 512, |
13245 | .also_non_np = 1, | 13301 | .also_non_np = 1, |
13246 | .np = 2, | 13302 | .np = 3, |
13247 | .tap = { 512 - 16, 16 }, | 13303 | .tap = { 512 - 20, 4, 16 }, |
13248 | }, | 13304 | }, |
13249 | }; | 13305 | }; |
13250 | 13306 | ||
@@ -13392,8 +13448,8 @@ static struct cipher_testvec cast6_xts_enc_tv_template[] = { | |||
13392 | "\x22\x60\x4E\xE8\xA4\x5D\x85\xB9", | 13448 | "\x22\x60\x4E\xE8\xA4\x5D\x85\xB9", |
13393 | .rlen = 512, | 13449 | .rlen = 512, |
13394 | .also_non_np = 1, | 13450 | .also_non_np = 1, |
13395 | .np = 2, | 13451 | .np = 3, |
13396 | .tap = { 512 - 16, 16 }, | 13452 | .tap = { 512 - 20, 4, 16 }, |
13397 | }, | 13453 | }, |
13398 | }; | 13454 | }; |
13399 | 13455 | ||
@@ -13541,8 +13597,8 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = { | |||
13541 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | 13597 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", |
13542 | .rlen = 512, | 13598 | .rlen = 512, |
13543 | .also_non_np = 1, | 13599 | .also_non_np = 1, |
13544 | .np = 2, | 13600 | .np = 3, |
13545 | .tap = { 512 - 16, 16 }, | 13601 | .tap = { 512 - 20, 4, 16 }, |
13546 | }, | 13602 | }, |
13547 | }; | 13603 | }; |
13548 | 13604 | ||
@@ -13749,8 +13805,8 @@ static struct cipher_testvec aes_enc_tv_template[] = { | |||
13749 | "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9", | 13805 | "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9", |
13750 | .rlen = 496, | 13806 | .rlen = 496, |
13751 | .also_non_np = 1, | 13807 | .also_non_np = 1, |
13752 | .np = 2, | 13808 | .np = 3, |
13753 | .tap = { 496 - 16, 16 }, | 13809 | .tap = { 496 - 20, 4, 16 }, |
13754 | }, | 13810 | }, |
13755 | }; | 13811 | }; |
13756 | 13812 | ||
@@ -13921,8 +13977,8 @@ static struct cipher_testvec aes_dec_tv_template[] = { | |||
13921 | "\xED\x56\xBF\x28\xB4\x1D\x86\x12", | 13977 | "\xED\x56\xBF\x28\xB4\x1D\x86\x12", |
13922 | .rlen = 496, | 13978 | .rlen = 496, |
13923 | .also_non_np = 1, | 13979 | .also_non_np = 1, |
13924 | .np = 2, | 13980 | .np = 3, |
13925 | .tap = { 496 - 16, 16 }, | 13981 | .tap = { 496 - 20, 4, 16 }, |
13926 | }, | 13982 | }, |
13927 | }; | 13983 | }; |
13928 | 13984 | ||
@@ -14140,8 +14196,8 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = { | |||
14140 | "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02", | 14196 | "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02", |
14141 | .rlen = 496, | 14197 | .rlen = 496, |
14142 | .also_non_np = 1, | 14198 | .also_non_np = 1, |
14143 | .np = 2, | 14199 | .np = 3, |
14144 | .tap = { 496 - 16, 16 }, | 14200 | .tap = { 496 - 20, 4, 16 }, |
14145 | }, | 14201 | }, |
14146 | }; | 14202 | }; |
14147 | 14203 | ||
@@ -14359,8 +14415,8 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = { | |||
14359 | "\xED\x56\xBF\x28\xB4\x1D\x86\x12", | 14415 | "\xED\x56\xBF\x28\xB4\x1D\x86\x12", |
14360 | .rlen = 496, | 14416 | .rlen = 496, |
14361 | .also_non_np = 1, | 14417 | .also_non_np = 1, |
14362 | .np = 2, | 14418 | .np = 3, |
14363 | .tap = { 496 - 16, 16 }, | 14419 | .tap = { 496 - 20, 4, 16 }, |
14364 | }, | 14420 | }, |
14365 | }; | 14421 | }; |
14366 | 14422 | ||
@@ -16265,8 +16321,8 @@ static struct cipher_testvec aes_lrw_enc_tv_template[] = { | |||
16265 | "\x74\x3f\x7d\x58\x88\x75\xde\x3e", | 16321 | "\x74\x3f\x7d\x58\x88\x75\xde\x3e", |
16266 | .rlen = 512, | 16322 | .rlen = 512, |
16267 | .also_non_np = 1, | 16323 | .also_non_np = 1, |
16268 | .np = 2, | 16324 | .np = 3, |
16269 | .tap = { 512 - 16, 16 }, | 16325 | .tap = { 512 - 20, 4, 16 }, |
16270 | } | 16326 | } |
16271 | }; | 16327 | }; |
16272 | 16328 | ||
@@ -16519,8 +16575,8 @@ static struct cipher_testvec aes_lrw_dec_tv_template[] = { | |||
16519 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", | 16575 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", |
16520 | .rlen = 512, | 16576 | .rlen = 512, |
16521 | .also_non_np = 1, | 16577 | .also_non_np = 1, |
16522 | .np = 2, | 16578 | .np = 3, |
16523 | .tap = { 512 - 16, 16 }, | 16579 | .tap = { 512 - 20, 4, 16 }, |
16524 | } | 16580 | } |
16525 | }; | 16581 | }; |
16526 | 16582 | ||
@@ -16861,8 +16917,8 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = { | |||
16861 | "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51", | 16917 | "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51", |
16862 | .rlen = 512, | 16918 | .rlen = 512, |
16863 | .also_non_np = 1, | 16919 | .also_non_np = 1, |
16864 | .np = 2, | 16920 | .np = 3, |
16865 | .tap = { 512 - 16, 16 }, | 16921 | .tap = { 512 - 20, 4, 16 }, |
16866 | } | 16922 | } |
16867 | }; | 16923 | }; |
16868 | 16924 | ||
@@ -17203,8 +17259,8 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = { | |||
17203 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | 17259 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", |
17204 | .rlen = 512, | 17260 | .rlen = 512, |
17205 | .also_non_np = 1, | 17261 | .also_non_np = 1, |
17206 | .np = 2, | 17262 | .np = 3, |
17207 | .tap = { 512 - 16, 16 }, | 17263 | .tap = { 512 - 20, 4, 16 }, |
17208 | } | 17264 | } |
17209 | }; | 17265 | }; |
17210 | 17266 | ||
@@ -17420,8 +17476,8 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = { | |||
17420 | "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51", | 17476 | "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51", |
17421 | .rlen = 496, | 17477 | .rlen = 496, |
17422 | .also_non_np = 1, | 17478 | .also_non_np = 1, |
17423 | .np = 2, | 17479 | .np = 3, |
17424 | .tap = { 496 - 16, 16 }, | 17480 | .tap = { 496 - 20, 4, 16 }, |
17425 | }, { /* Generated with Crypto++ */ | 17481 | }, { /* Generated with Crypto++ */ |
17426 | .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55" | 17482 | .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55" |
17427 | "\x0F\x32\x55\x78\x9B\xBE\x78\x9B" | 17483 | "\x0F\x32\x55\x78\x9B\xBE\x78\x9B" |
@@ -17775,8 +17831,8 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = { | |||
17775 | "\xED\x56\xBF\x28\xB4\x1D\x86\x12", | 17831 | "\xED\x56\xBF\x28\xB4\x1D\x86\x12", |
17776 | .rlen = 496, | 17832 | .rlen = 496, |
17777 | .also_non_np = 1, | 17833 | .also_non_np = 1, |
17778 | .np = 2, | 17834 | .np = 3, |
17779 | .tap = { 496 - 16, 16 }, | 17835 | .tap = { 496 - 20, 4, 16 }, |
17780 | }, { /* Generated with Crypto++ */ | 17836 | }, { /* Generated with Crypto++ */ |
17781 | .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55" | 17837 | .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55" |
17782 | "\x0F\x32\x55\x78\x9B\xBE\x78\x9B" | 17838 | "\x0F\x32\x55\x78\x9B\xBE\x78\x9B" |
@@ -20743,6 +20799,834 @@ static struct cprng_testvec ansi_cprng_aes_tv_template[] = { | |||
20743 | }, | 20799 | }, |
20744 | }; | 20800 | }; |
20745 | 20801 | ||
20802 | /* | ||
20803 | * SP800-90A DRBG Test vectors from | ||
20804 | * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip | ||
20805 | * | ||
20806 | * Test vectors for DRBG with prediction resistance. All types of DRBGs | ||
20807 | * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and | ||
20808 | * w/o personalization string, w/ and w/o additional input string). | ||
20809 | */ | ||
20810 | static struct drbg_testvec drbg_pr_sha256_tv_template[] = { | ||
20811 | { | ||
20812 | .entropy = (unsigned char *) | ||
20813 | "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86" | ||
20814 | "\xc1\xeb\xd2\x4e\x36\x14\xab\x18\xc4\x9c\xc9\xcf" | ||
20815 | "\x1a\xe8\xf7\x7b\x02\x49\x73\xd7\xf1\x42\x7d\xc6" | ||
20816 | "\x3f\x29\x2d\xec\xd3\x66\x51\x3f\x1d\x8d\x5b\x4e", | ||
20817 | .entropylen = 48, | ||
20818 | .entpra = (unsigned char *) | ||
20819 | "\x38\x9c\x91\xfa\xc2\xa3\x46\x89\x56\x08\x3f\x62" | ||
20820 | "\x73\xd5\x22\xa9\x29\x63\x3a\x1d\xe5\x5d\x5e\x4f" | ||
20821 | "\x67\xb0\x67\x7a\x5e\x9e\x0c\x62", | ||
20822 | .entprb = (unsigned char *) | ||
20823 | "\xb2\x8f\x36\xb2\xf6\x8d\x39\x13\xfa\x6c\x66\xcf" | ||
20824 | "\x62\x8a\x7e\x8c\x12\x33\x71\x9c\x69\xe4\xa5\xf0" | ||
20825 | "\x8c\xee\xeb\x9c\xf5\x31\x98\x31", | ||
20826 | .entprlen = 32, | ||
20827 | .expected = (unsigned char *) | ||
20828 | "\x52\x7b\xa3\xad\x71\x77\xa4\x49\x42\x04\x61\xc7" | ||
20829 | "\xf0\xaf\xa5\xfd\xd3\xb3\x0d\x6a\x61\xba\x35\x49" | ||
20830 | "\xbb\xaa\xaf\xe4\x25\x7d\xb5\x48\xaf\x5c\x18\x3d" | ||
20831 | "\x33\x8d\x9d\x45\xdf\x98\xd5\x94\xa8\xda\x92\xfe" | ||
20832 | "\xc4\x3c\x94\x2a\xcf\x7f\x7b\xf2\xeb\x28\xa9\xf1" | ||
20833 | "\xe0\x86\x30\xa8\xfe\xf2\x48\x90\x91\x0c\x75\xb5" | ||
20834 | "\x3c\x00\xf0\x4d\x09\x4f\x40\xa7\xa2\x8c\x52\xdf" | ||
20835 | "\x52\xef\x17\xbf\x3d\xd1\xa2\x31\xb4\xb8\xdc\xe6" | ||
20836 | "\x5b\x0d\x1f\x78\x36\xb4\xe6\x4b\xa7\x11\x25\xd5" | ||
20837 | "\x94\xc6\x97\x36\xab\xf0\xe5\x31\x28\x6a\xbb\xce" | ||
20838 | "\x30\x81\xa6\x8f\x27\x14\xf8\x1c", | ||
20839 | .expectedlen = 128, | ||
20840 | .addtla = NULL, | ||
20841 | .addtlb = NULL, | ||
20842 | .addtllen = 0, | ||
20843 | .pers = NULL, | ||
20844 | .perslen = 0, | ||
20845 | }, { | ||
20846 | .entropy = (unsigned char *) | ||
20847 | "\x5d\xf2\x14\xbc\xf6\xb5\x4e\x0b\xf0\x0d\x6f\x2d" | ||
20848 | "\xe2\x01\x66\x7b\xd0\xa4\x73\xa4\x21\xdd\xb0\xc0" | ||
20849 | "\x51\x79\x09\xf4\xea\xa9\x08\xfa\xa6\x67\xe0\xe1" | ||
20850 | "\xd1\x88\xa8\xad\xee\x69\x74\xb3\x55\x06\x9b\xf6", | ||
20851 | .entropylen = 48, | ||
20852 | .entpra = (unsigned char *) | ||
20853 | "\xef\x48\x06\xa2\xc2\x45\xf1\x44\xfa\x34\x2c\xeb" | ||
20854 | "\x8d\x78\x3c\x09\x8f\x34\x72\x20\xf2\xe7\xfd\x13" | ||
20855 | "\x76\x0a\xf6\xdc\x3c\xf5\xc0\x15", | ||
20856 | .entprb = (unsigned char *) | ||
20857 | "\x4b\xbe\xe5\x24\xed\x6a\x2d\x0c\xdb\x73\x5e\x09" | ||
20858 | "\xf9\xad\x67\x7c\x51\x47\x8b\x6b\x30\x2a\xc6\xde" | ||
20859 | "\x76\xaa\x55\x04\x8b\x0a\x72\x95", | ||
20860 | .entprlen = 32, | ||
20861 | .expected = (unsigned char *) | ||
20862 | "\x3b\x14\x71\x99\xa1\xda\xa0\x42\xe6\xc8\x85\x32" | ||
20863 | "\x70\x20\x32\x53\x9a\xbe\xd1\x1e\x15\xef\xfb\x4c" | ||
20864 | "\x25\x6e\x19\x3a\xf0\xb9\xcb\xde\xf0\x3b\xc6\x18" | ||
20865 | "\x4d\x85\x5a\x9b\xf1\xe3\xc2\x23\x03\x93\x08\xdb" | ||
20866 | "\xa7\x07\x4b\x33\x78\x40\x4d\xeb\x24\xf5\x6e\x81" | ||
20867 | "\x4a\x1b\x6e\xa3\x94\x52\x43\xb0\xaf\x2e\x21\xf4" | ||
20868 | "\x42\x46\x8e\x90\xed\x34\x21\x75\xea\xda\x67\xb6" | ||
20869 | "\xe4\xf6\xff\xc6\x31\x6c\x9a\x5a\xdb\xb3\x97\x13" | ||
20870 | "\x09\xd3\x20\x98\x33\x2d\x6d\xd7\xb5\x6a\xa8\xa9" | ||
20871 | "\x9a\x5b\xd6\x87\x52\xa1\x89\x2b\x4b\x9c\x64\x60" | ||
20872 | "\x50\x47\xa3\x63\x81\x16\xaf\x19", | ||
20873 | .expectedlen = 128, | ||
20874 | .addtla = (unsigned char *) | ||
20875 | "\xbe\x13\xdb\x2a\xe9\xa8\xfe\x09\x97\xe1\xce\x5d" | ||
20876 | "\xe8\xbb\xc0\x7c\x4f\xcb\x62\x19\x3f\x0f\xd2\xad" | ||
20877 | "\xa9\xd0\x1d\x59\x02\xc4\xff\x70", | ||
20878 | .addtlb = (unsigned char *) | ||
20879 | "\x6f\x96\x13\xe2\xa7\xf5\x6c\xfe\xdf\x66\xe3\x31" | ||
20880 | "\x63\x76\xbf\x20\x27\x06\x49\xf1\xf3\x01\x77\x41" | ||
20881 | "\x9f\xeb\xe4\x38\xfe\x67\x00\xcd", | ||
20882 | .addtllen = 32, | ||
20883 | .pers = NULL, | ||
20884 | .perslen = 0, | ||
20885 | }, { | ||
20886 | .entropy = (unsigned char *) | ||
20887 | "\xc6\x1c\xaf\x83\xa2\x56\x38\xf9\xb0\xbc\xd9\x85" | ||
20888 | "\xf5\x2e\xc4\x46\x9c\xe1\xb9\x40\x98\x70\x10\x72" | ||
20889 | "\xd7\x7d\x15\x85\xa1\x83\x5a\x97\xdf\xc8\xa8\xe8" | ||
20890 | "\x03\x4c\xcb\x70\x35\x8b\x90\x94\x46\x8a\x6e\xa1", | ||
20891 | .entropylen = 48, | ||
20892 | .entpra = (unsigned char *) | ||
20893 | "\xc9\x05\xa4\xcf\x28\x80\x4b\x93\x0f\x8b\xc6\xf9" | ||
20894 | "\x09\x41\x58\x74\xe9\xec\x28\xc7\x53\x0a\x73\x60" | ||
20895 | "\xba\x0a\xde\x57\x5b\x4b\x9f\x29", | ||
20896 | .entprb = (unsigned char *) | ||
20897 | "\x4f\x31\xd2\xeb\xac\xfa\xa8\xe2\x01\x7d\xf3\xbd" | ||
20898 | "\x42\xbd\x20\xa0\x30\x65\x74\xd5\x5d\xd2\xad\xa4" | ||
20899 | "\xa9\xeb\x1f\x4d\xf6\xfd\xb8\x26", | ||
20900 | .entprlen = 32, | ||
20901 | .expected = (unsigned char *) | ||
20902 | "\xf6\x13\x05\xcb\x83\x60\x16\x42\x49\x1d\xc6\x25" | ||
20903 | "\x3b\x8c\x31\xa3\xbe\x8b\xbd\x1c\xe2\xec\x1d\xde" | ||
20904 | "\xbb\xbf\xa1\xac\xa8\x9f\x50\xce\x69\xce\xef\xd5" | ||
20905 | "\xd6\xf2\xef\x6a\xf7\x81\x38\xdf\xbc\xa7\x5a\xb9" | ||
20906 | "\xb2\x42\x65\xab\xe4\x86\x8d\x2d\x9d\x59\x99\x2c" | ||
20907 | "\x5a\x0d\x71\x55\x98\xa4\x45\xc2\x8d\xdb\x05\x5e" | ||
20908 | "\x50\x21\xf7\xcd\xe8\x98\x43\xce\x57\x74\x63\x4c" | ||
20909 | "\xf3\xb1\xa5\x14\x1e\x9e\x01\xeb\x54\xd9\x56\xae" | ||
20910 | "\xbd\xb6\x6f\x1a\x47\x6b\x3b\x44\xe4\xa2\xe9\x3c" | ||
20911 | "\x6c\x83\x12\x30\xb8\x78\x7f\x8e\x54\x82\xd4\xfe" | ||
20912 | "\x90\x35\x0d\x4c\x4d\x85\xe7\x13", | ||
20913 | .expectedlen = 128, | ||
20914 | .addtla = NULL, | ||
20915 | .addtlb = NULL, | ||
20916 | .addtllen = 0, | ||
20917 | .pers = (unsigned char *) | ||
20918 | "\xa5\xbf\xac\x4f\x71\xa1\xbb\x67\x94\xc6\x50\xc7" | ||
20919 | "\x2a\x45\x9e\x10\xa8\xed\xf7\x52\x4f\xfe\x21\x90" | ||
20920 | "\xa4\x1b\xe1\xe2\x53\xcc\x61\x47", | ||
20921 | .perslen = 32, | ||
20922 | }, { | ||
20923 | .entropy = (unsigned char *) | ||
20924 | "\xb6\xc1\x8d\xdf\x99\x54\xbe\x95\x10\x48\xd9\xf6" | ||
20925 | "\xd7\x48\xa8\x73\x2d\x74\xde\x1e\xde\x57\x7e\xf4" | ||
20926 | "\x7b\x7b\x64\xef\x88\x7a\xa8\x10\x4b\xe1\xc1\x87" | ||
20927 | "\xbb\x0b\xe1\x39\x39\x50\xaf\x68\x9c\xa2\xbf\x5e", | ||
20928 | .entropylen = 48, | ||
20929 | .entpra = (unsigned char *) | ||
20930 | "\xdc\x81\x0a\x01\x58\xa7\x2e\xce\xee\x48\x8c\x7c" | ||
20931 | "\x77\x9e\x3c\xf1\x17\x24\x7a\xbb\xab\x9f\xca\x12" | ||
20932 | "\x19\xaf\x97\x2d\x5f\xf9\xff\xfc", | ||
20933 | .entprb = (unsigned char *) | ||
20934 | "\xaf\xfc\x4f\x98\x8b\x93\x95\xc1\xb5\x8b\x7f\x73" | ||
20935 | "\x6d\xa6\xbe\x6d\x33\xeb\x2c\x82\xb1\xaf\xc1\xb6" | ||
20936 | "\xb6\x05\xe2\x44\xaa\xfd\xe7\xdb", | ||
20937 | .entprlen = 32, | ||
20938 | .expected = (unsigned char *) | ||
20939 | "\x51\x79\xde\x1c\x0f\x58\xf3\xf4\xc9\x57\x2e\x31" | ||
20940 | "\xa7\x09\xa1\x53\x64\x63\xa2\xc5\x1d\x84\x88\x65" | ||
20941 | "\x01\x1b\xc6\x16\x3c\x49\x5b\x42\x8e\x53\xf5\x18" | ||
20942 | "\xad\x94\x12\x0d\x4f\x55\xcc\x45\x5c\x98\x0f\x42" | ||
20943 | "\x28\x2f\x47\x11\xf9\xc4\x01\x97\x6b\xa0\x94\x50" | ||
20944 | "\xa9\xd1\x5e\x06\x54\x3f\xdf\xbb\xc4\x98\xee\x8b" | ||
20945 | "\xba\xa9\xfa\x49\xee\x1d\xdc\xfb\x50\xf6\x51\x9f" | ||
20946 | "\x6c\x4a\x9a\x6f\x63\xa2\x7d\xad\xaf\x3a\x24\xa0" | ||
20947 | "\xd9\x9f\x07\xeb\x15\xee\x26\xe0\xd5\x63\x39\xda" | ||
20948 | "\x3c\x59\xd6\x33\x6c\x02\xe8\x05\x71\x46\x68\x44" | ||
20949 | "\x63\x4a\x68\x72\xe9\xf5\x55\xfe", | ||
20950 | .expectedlen = 128, | ||
20951 | .addtla = (unsigned char *) | ||
20952 | "\x15\x20\x2f\xf6\x98\x28\x63\xa2\xc4\x4e\xbb\x6c" | ||
20953 | "\xb2\x25\x92\x61\x79\xc9\x22\xc4\x61\x54\x96\xff" | ||
20954 | "\x4a\x85\xca\x80\xfe\x0d\x1c\xd0", | ||
20955 | .addtlb = (unsigned char *) | ||
20956 | "\xde\x29\x8e\x03\x42\x61\xa3\x28\x5e\xc8\x80\xc2" | ||
20957 | "\x6d\xbf\xad\x13\xe1\x8d\x2a\xc7\xe8\xc7\x18\x89" | ||
20958 | "\x42\x58\x9e\xd6\xcc\xad\x7b\x1e", | ||
20959 | .addtllen = 32, | ||
20960 | .pers = (unsigned char *) | ||
20961 | "\x84\xc3\x73\x9e\xce\xb3\xbc\x89\xf7\x62\xb3\xe1" | ||
20962 | "\xd7\x48\x45\x8a\xa9\xcc\xe9\xed\xd5\x81\x84\x52" | ||
20963 | "\x82\x4c\xdc\x19\xb8\xf8\x92\x5c", | ||
20964 | .perslen = 32, | ||
20965 | }, | ||
20966 | }; | ||
20967 | |||
20968 | static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = { | ||
20969 | { | ||
20970 | .entropy = (unsigned char *) | ||
20971 | "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a" | ||
20972 | "\x7e\x5c\x0e\xae\x0d\x3e\x30\x95\x59\xe9\xfe\x96" | ||
20973 | "\xb0\x67\x6d\x49\xd5\x91\xea\x4d\x07\xd2\x0d\x46" | ||
20974 | "\xd0\x64\x75\x7d\x30\x23\xca\xc2\x37\x61\x27\xab", | ||
20975 | .entropylen = 48, | ||
20976 | .entpra = (unsigned char *) | ||
20977 | "\xc6\x0f\x29\x99\x10\x0f\x73\x8c\x10\xf7\x47\x92" | ||
20978 | "\x67\x6a\x3f\xc4\xa2\x62\xd1\x37\x21\x79\x80\x46" | ||
20979 | "\xe2\x9a\x29\x51\x81\x56\x9f\x54", | ||
20980 | .entprb = (unsigned char *) | ||
20981 | "\xc1\x1d\x45\x24\xc9\x07\x1b\xd3\x09\x60\x15\xfc" | ||
20982 | "\xf7\xbc\x24\xa6\x07\xf2\x2f\xa0\x65\xc9\x37\x65" | ||
20983 | "\x8a\x2a\x77\xa8\x69\x90\x89\xf4", | ||
20984 | .entprlen = 32, | ||
20985 | .expected = (unsigned char *) | ||
20986 | "\xab\xc0\x15\x85\x60\x94\x80\x3a\x93\x8d\xff\xd2" | ||
20987 | "\x0d\xa9\x48\x43\x87\x0e\xf9\x35\xb8\x2c\xfe\xc1" | ||
20988 | "\x77\x06\xb8\xf5\x51\xb8\x38\x50\x44\x23\x5d\xd4" | ||
20989 | "\x4b\x59\x9f\x94\xb3\x9b\xe7\x8d\xd4\x76\xe0\xcf" | ||
20990 | "\x11\x30\x9c\x99\x5a\x73\x34\xe0\xa7\x8b\x37\xbc" | ||
20991 | "\x95\x86\x23\x50\x86\xfa\x3b\x63\x7b\xa9\x1c\xf8" | ||
20992 | "\xfb\x65\xef\xa2\x2a\x58\x9c\x13\x75\x31\xaa\x7b" | ||
20993 | "\x2d\x4e\x26\x07\xaa\xc2\x72\x92\xb0\x1c\x69\x8e" | ||
20994 | "\x6e\x01\xae\x67\x9e\xb8\x7c\x01\xa8\x9c\x74\x22" | ||
20995 | "\xd4\x37\x2d\x6d\x75\x4a\xba\xbb\x4b\xf8\x96\xfc" | ||
20996 | "\xb1\xcd\x09\xd6\x92\xd0\x28\x3f", | ||
20997 | .expectedlen = 128, | ||
20998 | .addtla = NULL, | ||
20999 | .addtlb = NULL, | ||
21000 | .addtllen = 0, | ||
21001 | .pers = NULL, | ||
21002 | .perslen = 0, | ||
21003 | }, { | ||
21004 | .entropy = (unsigned char *) | ||
21005 | "\xb9\x1f\xe9\xef\xdd\x9b\x7d\x20\xb6\xec\xe0\x2f" | ||
21006 | "\xdb\x76\x24\xce\x41\xc8\x3a\x4a\x12\x7f\x3e\x2f" | ||
21007 | "\xae\x05\x99\xea\xb5\x06\x71\x0d\x0c\x4c\xb4\x05" | ||
21008 | "\x26\xc6\xbd\xf5\x7f\x2a\x3d\xf2\xb5\x49\x7b\xda", | ||
21009 | .entropylen = 48, | ||
21010 | .entpra = (unsigned char *) | ||
21011 | "\xef\x67\x50\x9c\xa7\x7d\xdf\xb7\x2d\x81\x01\xa4" | ||
21012 | "\x62\x81\x6a\x69\x5b\xb3\x37\x45\xa7\x34\x8e\x26" | ||
21013 | "\x46\xd9\x26\xa2\x19\xd4\x94\x43", | ||
21014 | .entprb = (unsigned char *) | ||
21015 | "\x97\x75\x53\x53\xba\xb4\xa6\xb2\x91\x60\x71\x79" | ||
21016 | "\xd1\x6b\x4a\x24\x9a\x34\x66\xcc\x33\xab\x07\x98" | ||
21017 | "\x51\x78\x72\xb2\x79\xfd\x2c\xff", | ||
21018 | .entprlen = 32, | ||
21019 | .expected = (unsigned char *) | ||
21020 | "\x9c\xdc\x63\x8a\x19\x23\x22\x66\x0c\xc5\xb9\xd7" | ||
21021 | "\xfb\x2a\xb0\x31\xe3\x8a\x36\xa8\x5a\xa8\x14\xda" | ||
21022 | "\x1e\xa9\xcc\xfe\xb8\x26\x44\x83\x9f\xf6\xff\xaa" | ||
21023 | "\xc8\x98\xb8\x30\x35\x3b\x3d\x36\xd2\x49\xd4\x40" | ||
21024 | "\x62\x0a\x65\x10\x76\x55\xef\xc0\x95\x9c\xa7\xda" | ||
21025 | "\x3f\xcf\xb7\x7b\xc6\xe1\x28\x52\xfc\x0c\xe2\x37" | ||
21026 | "\x0d\x83\xa7\x51\x4b\x31\x47\x3c\xe1\x3c\xae\x70" | ||
21027 | "\x01\xc8\xa3\xd3\xc2\xac\x77\x9c\xd1\x68\x77\x9b" | ||
21028 | "\x58\x27\x3b\xa5\x0f\xc2\x7a\x8b\x04\x65\x62\xd5" | ||
21029 | "\xe8\xd6\xfe\x2a\xaf\xd3\xd3\xfe\xbd\x18\xfb\xcd" | ||
21030 | "\xcd\x66\xb5\x01\x69\x66\xa0\x3c", | ||
21031 | .expectedlen = 128, | ||
21032 | .addtla = (unsigned char *) | ||
21033 | "\x17\xc1\x56\xcb\xcc\x50\xd6\x03\x7d\x45\x76\xa3" | ||
21034 | "\x75\x76\xc1\x4a\x66\x1b\x2e\xdf\xb0\x2e\x7d\x56" | ||
21035 | "\x6d\x99\x3b\xc6\x58\xda\x03\xf6", | ||
21036 | .addtlb = (unsigned char *) | ||
21037 | "\x7c\x7b\x4a\x4b\x32\x5e\x6f\x67\x34\xf5\x21\x4c" | ||
21038 | "\xf9\x96\xf9\xbf\x1c\x8c\x81\xd3\x9b\x60\x6a\x44" | ||
21039 | "\xc6\x03\xa2\xfb\x13\x20\x19\xb7", | ||
21040 | .addtllen = 32, | ||
21041 | .pers = NULL, | ||
21042 | .perslen = 0, | ||
21043 | }, { | ||
21044 | .entropy = (unsigned char *) | ||
21045 | "\x13\x54\x96\xfc\x1b\x7d\x28\xf3\x18\xc9\xa7\x89" | ||
21046 | "\xb6\xb3\xc8\x72\xac\x00\xd4\x59\x36\x25\x05\xaf" | ||
21047 | "\xa5\xdb\x96\xcb\x3c\x58\x46\x87\xa5\xaa\xbf\x20" | ||
21048 | "\x3b\xfe\x23\x0e\xd1\xc7\x41\x0f\x3f\xc9\xb3\x67", | ||
21049 | .entropylen = 48, | ||
21050 | .entpra = (unsigned char *) | ||
21051 | "\xe2\xbd\xb7\x48\x08\x06\xf3\xe1\x93\x3c\xac\x79" | ||
21052 | "\xa7\x2b\x11\xda\xe3\x2e\xe1\x91\xa5\x02\x19\x57" | ||
21053 | "\x20\x28\xad\xf2\x60\xd7\xcd\x45", | ||
21054 | .entprb = (unsigned char *) | ||
21055 | "\x8b\xd4\x69\xfc\xff\x59\x95\x95\xc6\x51\xde\x71" | ||
21056 | "\x68\x5f\xfc\xf9\x4a\xab\xec\x5a\xcb\xbe\xd3\x66" | ||
21057 | "\x1f\xfa\x74\xd3\xac\xa6\x74\x60", | ||
21058 | .entprlen = 32, | ||
21059 | .expected = (unsigned char *) | ||
21060 | "\x1f\x9e\xaf\xe4\xd2\x46\xb7\x47\x41\x4c\x65\x99" | ||
21061 | "\x01\xe9\x3b\xbb\x83\x0c\x0a\xb0\xc1\x3a\xe2\xb3" | ||
21062 | "\x31\x4e\xeb\x93\x73\xee\x0b\x26\xc2\x63\xa5\x75" | ||
21063 | "\x45\x99\xd4\x5c\x9f\xa1\xd4\x45\x87\x6b\x20\x61" | ||
21064 | "\x40\xea\x78\xa5\x32\xdf\x9e\x66\x17\xaf\xb1\x88" | ||
21065 | "\x9e\x2e\x23\xdd\xc1\xda\x13\x97\x88\xa5\xb6\x5e" | ||
21066 | "\x90\x14\x4e\xef\x13\xab\x5c\xd9\x2c\x97\x9e\x7c" | ||
21067 | "\xd7\xf8\xce\xea\x81\xf5\xcd\x71\x15\x49\x44\xce" | ||
21068 | "\x83\xb6\x05\xfb\x7d\x30\xb5\x57\x2c\x31\x4f\xfc" | ||
21069 | "\xfe\x80\xb6\xc0\x13\x0c\x5b\x9b\x2e\x8f\x3d\xfc" | ||
21070 | "\xc2\xa3\x0c\x11\x1b\x80\x5f\xf3", | ||
21071 | .expectedlen = 128, | ||
21072 | .addtla = NULL, | ||
21073 | .addtlb = NULL, | ||
21074 | .addtllen = 0, | ||
21075 | .pers = (unsigned char *) | ||
21076 | "\x64\xb6\xfc\x60\xbc\x61\x76\x23\x6d\x3f\x4a\x0f" | ||
21077 | "\xe1\xb4\xd5\x20\x9e\x70\xdd\x03\x53\x6d\xbf\xce" | ||
21078 | "\xcd\x56\x80\xbc\xb8\x15\xc8\xaa", | ||
21079 | .perslen = 32, | ||
21080 | }, { | ||
21081 | .entropy = (unsigned char *) | ||
21082 | "\xc7\xcc\xbc\x67\x7e\x21\x66\x1e\x27\x2b\x63\xdd" | ||
21083 | "\x3a\x78\xdc\xdf\x66\x6d\x3f\x24\xae\xcf\x37\x01" | ||
21084 | "\xa9\x0d\x89\x8a\xa7\xdc\x81\x58\xae\xb2\x10\x15" | ||
21085 | "\x7e\x18\x44\x6d\x13\xea\xdf\x37\x85\xfe\x81\xfb", | ||
21086 | .entropylen = 48, | ||
21087 | .entpra = (unsigned char *) | ||
21088 | "\x7b\xa1\x91\x5b\x3c\x04\xc4\x1b\x1d\x19\x2f\x1a" | ||
21089 | "\x18\x81\x60\x3c\x6c\x62\x91\xb7\xe9\xf5\xcb\x96" | ||
21090 | "\xbb\x81\x6a\xcc\xb5\xae\x55\xb6", | ||
21091 | .entprb = (unsigned char *) | ||
21092 | "\x99\x2c\xc7\x78\x7e\x3b\x88\x12\xef\xbe\xd3\xd2" | ||
21093 | "\x7d\x2a\xa5\x86\xda\x8d\x58\x73\x4a\x0a\xb2\x2e" | ||
21094 | "\xbb\x4c\x7e\xe3\x9a\xb6\x81\xc1", | ||
21095 | .entprlen = 32, | ||
21096 | .expected = (unsigned char *) | ||
21097 | "\x95\x6f\x95\xfc\x3b\xb7\xfe\x3e\xd0\x4e\x1a\x14" | ||
21098 | "\x6c\x34\x7f\x7b\x1d\x0d\x63\x5e\x48\x9c\x69\xe6" | ||
21099 | "\x46\x07\xd2\x87\xf3\x86\x52\x3d\x98\x27\x5e\xd7" | ||
21100 | "\x54\xe7\x75\x50\x4f\xfb\x4d\xfd\xac\x2f\x4b\x77" | ||
21101 | "\xcf\x9e\x8e\xcc\x16\xa2\x24\xcd\x53\xde\x3e\xc5" | ||
21102 | "\x55\x5d\xd5\x26\x3f\x89\xdf\xca\x8b\x4e\x1e\xb6" | ||
21103 | "\x88\x78\x63\x5c\xa2\x63\x98\x4e\x6f\x25\x59\xb1" | ||
21104 | "\x5f\x2b\x23\xb0\x4b\xa5\x18\x5d\xc2\x15\x74\x40" | ||
21105 | "\x59\x4c\xb4\x1e\xcf\x9a\x36\xfd\x43\xe2\x03\xb8" | ||
21106 | "\x59\x91\x30\x89\x2a\xc8\x5a\x43\x23\x7c\x73\x72" | ||
21107 | "\xda\x3f\xad\x2b\xba\x00\x6b\xd1", | ||
21108 | .expectedlen = 128, | ||
21109 | .addtla = (unsigned char *) | ||
21110 | "\x18\xe8\x17\xff\xef\x39\xc7\x41\x5c\x73\x03\x03" | ||
21111 | "\xf6\x3d\xe8\x5f\xc8\xab\xe4\xab\x0f\xad\xe8\xd6" | ||
21112 | "\x86\x88\x55\x28\xc1\x69\xdd\x76", | ||
21113 | .addtlb = (unsigned char *) | ||
21114 | "\xac\x07\xfc\xbe\x87\x0e\xd3\xea\x1f\x7e\xb8\xe7" | ||
21115 | "\x9d\xec\xe8\xe7\xbc\xf3\x18\x25\x77\x35\x4a\xaa" | ||
21116 | "\x00\x99\x2a\xdd\x0a\x00\x50\x82", | ||
21117 | .addtllen = 32, | ||
21118 | .pers = (unsigned char *) | ||
21119 | "\xbc\x55\xab\x3c\xf6\x52\xb0\x11\x3d\x7b\x90\xb8" | ||
21120 | "\x24\xc9\x26\x4e\x5a\x1e\x77\x0d\x3d\x58\x4a\xda" | ||
21121 | "\xd1\x81\xe9\xf8\xeb\x30\x8f\x6f", | ||
21122 | .perslen = 32, | ||
21123 | }, | ||
21124 | }; | ||
21125 | |||
21126 | static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = { | ||
21127 | { | ||
21128 | .entropy = (unsigned char *) | ||
21129 | "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42" | ||
21130 | "\x94\xd7\x28\x9c\x43\x77\x19\x29\x1a\x6d\xc3\xa2", | ||
21131 | .entropylen = 24, | ||
21132 | .entpra = (unsigned char *) | ||
21133 | "\x96\xd8\x9e\x45\x32\xc9\xd2\x08\x7a\x6d\x97\x15" | ||
21134 | "\xb4\xec\x80\xb1", | ||
21135 | .entprb = (unsigned char *) | ||
21136 | "\x8b\xb6\x72\xb5\x24\x0b\x98\x65\x95\x95\xe9\xc9" | ||
21137 | "\x28\x07\xeb\xc2", | ||
21138 | .entprlen = 16, | ||
21139 | .expected = (unsigned char *) | ||
21140 | "\x70\x19\xd0\x4c\x45\x78\xd6\x68\xa9\x9a\xaa\xfe" | ||
21141 | "\xc1\xdf\x27\x9a\x1c\x0d\x0d\xf7\x24\x75\x46\xcc" | ||
21142 | "\x77\x6b\xdf\x89\xc6\x94\xdc\x74\x50\x10\x70\x18" | ||
21143 | "\x9b\xdc\x96\xb4\x89\x23\x40\x1a\xce\x09\x87\xce" | ||
21144 | "\xd2\xf3\xd5\xe4\x51\x67\x74\x11\x5a\xcc\x8b\x3b" | ||
21145 | "\x8a\xf1\x23\xa8", | ||
21146 | .expectedlen = 64, | ||
21147 | .addtla = NULL, | ||
21148 | .addtlb = NULL, | ||
21149 | .addtllen = 0, | ||
21150 | .pers = NULL, | ||
21151 | .perslen = 0, | ||
21152 | }, { | ||
21153 | .entropy = (unsigned char *) | ||
21154 | "\x8e\x83\xe0\xeb\x37\xea\x3e\x53\x5e\x17\x6e\x77" | ||
21155 | "\xbd\xb1\x53\x90\xfc\xdc\xc1\x3c\x9a\x88\x22\x94", | ||
21156 | .entropylen = 24, | ||
21157 | .entpra = (unsigned char *) | ||
21158 | "\x6a\x85\xe7\x37\xc8\xf1\x04\x31\x98\x4f\xc8\x73" | ||
21159 | "\x67\xd1\x08\xf8", | ||
21160 | .entprb = (unsigned char *) | ||
21161 | "\xd7\xa4\x68\xe2\x12\x74\xc3\xd9\xf1\xb7\x05\xbc" | ||
21162 | "\xd4\xba\x04\x58", | ||
21163 | .entprlen = 16, | ||
21164 | .expected = (unsigned char *) | ||
21165 | "\x78\xd6\xa6\x70\xff\xd1\x82\xf5\xa2\x88\x7f\x6d" | ||
21166 | "\x3d\x8c\x39\xb1\xa8\xcb\x2c\x91\xab\x14\x7e\xbc" | ||
21167 | "\x95\x45\x9f\x24\xb8\x20\xac\x21\x23\xdb\x72\xd7" | ||
21168 | "\x12\x8d\x48\x95\xf3\x19\x0c\x43\xc6\x19\x45\xfc" | ||
21169 | "\x8b\xac\x40\x29\x73\x00\x03\x45\x5e\x12\xff\x0c" | ||
21170 | "\xc1\x02\x41\x82", | ||
21171 | .expectedlen = 64, | ||
21172 | .addtla = (unsigned char *) | ||
21173 | "\xa2\xd9\x38\xcf\x8b\x29\x67\x5b\x65\x62\x6f\xe8" | ||
21174 | "\xeb\xb3\x01\x76", | ||
21175 | .addtlb = (unsigned char *) | ||
21176 | "\x59\x63\x1e\x81\x8a\x14\xa8\xbb\xa1\xb8\x41\x25" | ||
21177 | "\xd0\x7f\xcc\x43", | ||
21178 | .addtllen = 16, | ||
21179 | .pers = NULL, | ||
21180 | .perslen = 0, | ||
21181 | }, { | ||
21182 | .entropy = (unsigned char *) | ||
21183 | "\x04\xd9\x49\xa6\xdc\xe8\x6e\xbb\xf1\x08\x77\x2b" | ||
21184 | "\x9e\x08\xca\x92\x65\x16\xda\x99\xa2\x59\xf3\xe8", | ||
21185 | .entropylen = 24, | ||
21186 | .entpra = (unsigned char *) | ||
21187 | "\x38\x7e\x3f\x6b\x51\x70\x7b\x20\xec\x53\xd0\x66" | ||
21188 | "\xc3\x0f\xe3\xb0", | ||
21189 | .entprb = (unsigned char *) | ||
21190 | "\xe0\x86\xa6\xaa\x5f\x72\x2f\xad\xf7\xef\x06\xb8" | ||
21191 | "\xd6\x9c\x9d\xe8", | ||
21192 | .entprlen = 16, | ||
21193 | .expected = (unsigned char *) | ||
21194 | "\xc9\x0a\xaf\x85\x89\x71\x44\x66\x4f\x25\x0b\x2b" | ||
21195 | "\xde\xd8\xfa\xff\x52\x5a\x1b\x32\x5e\x41\x7a\x10" | ||
21196 | "\x1f\xef\x1e\x62\x23\xe9\x20\x30\xc9\x0d\xad\x69" | ||
21197 | "\xb4\x9c\x5b\xf4\x87\x42\xd5\xae\x5e\x5e\x43\xcc" | ||
21198 | "\xd9\xfd\x0b\x93\x4a\xe3\xd4\x06\x37\x36\x0f\x3f" | ||
21199 | "\x72\x82\x0c\xcf", | ||
21200 | .expectedlen = 64, | ||
21201 | .addtla = NULL, | ||
21202 | .addtlb = NULL, | ||
21203 | .addtllen = 0, | ||
21204 | .pers = (unsigned char *) | ||
21205 | "\xbf\xa4\x9a\x8f\x7b\xd8\xb1\x7a\x9d\xfa\x45\xed" | ||
21206 | "\x21\x52\xb3\xad", | ||
21207 | .perslen = 16, | ||
21208 | }, { | ||
21209 | .entropy = (unsigned char *) | ||
21210 | "\x92\x89\x8f\x31\xfa\x1c\xff\x6d\x18\x2f\x26\x06" | ||
21211 | "\x43\xdf\xf8\x18\xc2\xa4\xd9\x72\xc3\xb9\xb6\x97", | ||
21212 | .entropylen = 24, | ||
21213 | .entpra = (unsigned char *) | ||
21214 | "\x20\x72\x8a\x06\xf8\x6f\x8d\xd4\x41\xe2\x72\xb7" | ||
21215 | "\xc4\x2c\xe8\x10", | ||
21216 | .entprb = (unsigned char *) | ||
21217 | "\x3d\xb0\xf0\x94\xf3\x05\x50\x33\x17\x86\x3e\x22" | ||
21218 | "\x08\xf7\xa5\x01", | ||
21219 | .entprlen = 16, | ||
21220 | .expected = (unsigned char *) | ||
21221 | "\x5a\x35\x39\x87\x0f\x4d\x22\xa4\x09\x24\xee\x71" | ||
21222 | "\xc9\x6f\xac\x72\x0a\xd6\xf0\x88\x82\xd0\x83\x28" | ||
21223 | "\x73\xec\x3f\x93\xd8\xab\x45\x23\xf0\x7e\xac\x45" | ||
21224 | "\x14\x5e\x93\x9f\xb1\xd6\x76\x43\x3d\xb6\xe8\x08" | ||
21225 | "\x88\xf6\xda\x89\x08\x77\x42\xfe\x1a\xf4\x3f\xc4" | ||
21226 | "\x23\xc5\x1f\x68", | ||
21227 | .expectedlen = 64, | ||
21228 | .addtla = (unsigned char *) | ||
21229 | "\x1a\x40\xfa\xe3\xcc\x6c\x7c\xa0\xf8\xda\xba\x59" | ||
21230 | "\x23\x6d\xad\x1d", | ||
21231 | .addtlb = (unsigned char *) | ||
21232 | "\x9f\x72\x76\x6c\xc7\x46\xe5\xed\x2e\x53\x20\x12" | ||
21233 | "\xbc\x59\x31\x8c", | ||
21234 | .addtllen = 16, | ||
21235 | .pers = (unsigned char *) | ||
21236 | "\xea\x65\xee\x60\x26\x4e\x7e\xb6\x0e\x82\x68\xc4" | ||
21237 | "\x37\x3c\x5c\x0b", | ||
21238 | .perslen = 16, | ||
21239 | }, | ||
21240 | }; | ||
21241 | |||
21242 | /* | ||
21243 | * SP800-90A DRBG Test vectors from | ||
21244 | * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip | ||
21245 | * | ||
21246 | * Test vectors for DRBG without prediction resistance. All types of DRBGs | ||
21247 | * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and | ||
21248 | * w/o personalization string, w/ and w/o additional input string). | ||
21249 | */ | ||
21250 | static struct drbg_testvec drbg_nopr_sha256_tv_template[] = { | ||
21251 | { | ||
21252 | .entropy = (unsigned char *) | ||
21253 | "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3" | ||
21254 | "\xa2\xe7\x1f\x42\xc7\x12\x9d\x62\x0f\xf5\xc1\x19" | ||
21255 | "\xa9\xef\x55\xf0\x51\x85\xe0\xfb\x85\x81\xf9\x31" | ||
21256 | "\x75\x17\x27\x6e\x06\xe9\x60\x7d\xdb\xcb\xcc\x2e", | ||
21257 | .entropylen = 48, | ||
21258 | .expected = (unsigned char *) | ||
21259 | "\xd3\xe1\x60\xc3\x5b\x99\xf3\x40\xb2\x62\x82\x64" | ||
21260 | "\xd1\x75\x10\x60\xe0\x04\x5d\xa3\x83\xff\x57\xa5" | ||
21261 | "\x7d\x73\xa6\x73\xd2\xb8\xd8\x0d\xaa\xf6\xa6\xc3" | ||
21262 | "\x5a\x91\xbb\x45\x79\xd7\x3f\xd0\xc8\xfe\xd1\x11" | ||
21263 | "\xb0\x39\x13\x06\x82\x8a\xdf\xed\x52\x8f\x01\x81" | ||
21264 | "\x21\xb3\xfe\xbd\xc3\x43\xe7\x97\xb8\x7d\xbb\x63" | ||
21265 | "\xdb\x13\x33\xde\xd9\xd1\xec\xe1\x77\xcf\xa6\xb7" | ||
21266 | "\x1f\xe8\xab\x1d\xa4\x66\x24\xed\x64\x15\xe5\x1c" | ||
21267 | "\xcd\xe2\xc7\xca\x86\xe2\x83\x99\x0e\xea\xeb\x91" | ||
21268 | "\x12\x04\x15\x52\x8b\x22\x95\x91\x02\x81\xb0\x2d" | ||
21269 | "\xd4\x31\xf4\xc9\xf7\x04\x27\xdf", | ||
21270 | .expectedlen = 128, | ||
21271 | .addtla = NULL, | ||
21272 | .addtlb = NULL, | ||
21273 | .addtllen = 0, | ||
21274 | .pers = NULL, | ||
21275 | .perslen = 0, | ||
21276 | }, { | ||
21277 | .entropy = (unsigned char *) | ||
21278 | "\x73\xd3\xfb\xa3\x94\x5f\x2b\x5f\xb9\x8f\xf6\x9c" | ||
21279 | "\x8a\x93\x17\xae\x19\xc3\x4c\xc3\xd6\xca\xa3\x2d" | ||
21280 | "\x16\xfc\x42\xd2\x2d\xd5\x6f\x56\xcc\x1d\x30\xff" | ||
21281 | "\x9e\x06\x3e\x09\xce\x58\xe6\x9a\x35\xb3\xa6\x56", | ||
21282 | .entropylen = 48, | ||
21283 | .expected = (unsigned char *) | ||
21284 | "\x71\x7b\x93\x46\x1a\x40\xaa\x35\xa4\xaa\xc5\xe7" | ||
21285 | "\x6d\x5b\x5b\x8a\xa0\xdf\x39\x7d\xae\x71\x58\x5b" | ||
21286 | "\x3c\x7c\xb4\xf0\x89\xfa\x4a\x8c\xa9\x5c\x54\xc0" | ||
21287 | "\x40\xdf\xbc\xce\x26\x81\x34\xf8\xba\x7d\x1c\xe8" | ||
21288 | "\xad\x21\xe0\x74\xcf\x48\x84\x30\x1f\xa1\xd5\x4f" | ||
21289 | "\x81\x42\x2f\xf4\xdb\x0b\x23\xf8\x73\x27\xb8\x1d" | ||
21290 | "\x42\xf8\x44\x58\xd8\x5b\x29\x27\x0a\xf8\x69\x59" | ||
21291 | "\xb5\x78\x44\xeb\x9e\xe0\x68\x6f\x42\x9a\xb0\x5b" | ||
21292 | "\xe0\x4e\xcb\x6a\xaa\xe2\xd2\xd5\x33\x25\x3e\xe0" | ||
21293 | "\x6c\xc7\x6a\x07\xa5\x03\x83\x9f\xe2\x8b\xd1\x1c" | ||
21294 | "\x70\xa8\x07\x59\x97\xeb\xf6\xbe", | ||
21295 | .expectedlen = 128, | ||
21296 | .addtla = (unsigned char *) | ||
21297 | "\xf4\xd5\x98\x3d\xa8\xfc\xfa\x37\xb7\x54\x67\x73" | ||
21298 | "\xc7\xc3\xdd\x47\x34\x71\x02\x5d\xc1\xa0\xd3\x10" | ||
21299 | "\xc1\x8b\xbd\xf5\x66\x34\x6f\xdd", | ||
21300 | .addtlb = (unsigned char *) | ||
21301 | "\xf7\x9e\x6a\x56\x0e\x73\xe9\xd9\x7a\xd1\x69\xe0" | ||
21302 | "\x6f\x8c\x55\x1c\x44\xd1\xce\x6f\x28\xcc\xa4\x4d" | ||
21303 | "\xa8\xc0\x85\xd1\x5a\x0c\x59\x40", | ||
21304 | .addtllen = 32, | ||
21305 | .pers = NULL, | ||
21306 | .perslen = 0, | ||
21307 | }, { | ||
21308 | .entropy = (unsigned char *) | ||
21309 | "\x2a\x85\xa9\x8b\xd0\xda\x83\xd6\xad\xab\x9f\xbb" | ||
21310 | "\x54\x31\x15\x95\x1c\x4d\x49\x9f\x6a\x15\xf6\xe4" | ||
21311 | "\x15\x50\x88\x06\x29\x0d\xed\x8d\xb9\x6f\x96\xe1" | ||
21312 | "\x83\x9f\xf7\x88\xda\x84\xbf\x44\x28\xd9\x1d\xaa", | ||
21313 | .entropylen = 48, | ||
21314 | .expected = (unsigned char *) | ||
21315 | "\x2d\x55\xde\xc9\xed\x05\x47\x07\x3d\x04\xfc\x28" | ||
21316 | "\x0f\x92\xf0\x4d\xd8\x00\x32\x47\x0a\x1b\x1c\x4b" | ||
21317 | "\xef\xd9\x97\xa1\x17\x67\xda\x26\x6c\xfe\x76\x46" | ||
21318 | "\x6f\xbc\x6d\x82\x4e\x83\x8a\x98\x66\x6c\x01\xb6" | ||
21319 | "\xe6\x64\xe0\x08\x10\x6f\xd3\x5d\x90\xe7\x0d\x72" | ||
21320 | "\xa6\xa7\xe3\xbb\x98\x11\x12\x56\x23\xc2\x6d\xd1" | ||
21321 | "\xc8\xa8\x7a\x39\xf3\x34\xe3\xb8\xf8\x66\x00\x77" | ||
21322 | "\x7d\xcf\x3c\x3e\xfa\xc9\x0f\xaf\xe0\x24\xfa\xe9" | ||
21323 | "\x84\xf9\x6a\x01\xf6\x35\xdb\x5c\xab\x2a\xef\x4e" | ||
21324 | "\xac\xab\x55\xb8\x9b\xef\x98\x68\xaf\x51\xd8\x16" | ||
21325 | "\xa5\x5e\xae\xf9\x1e\xd2\xdb\xe6", | ||
21326 | .expectedlen = 128, | ||
21327 | .addtla = NULL, | ||
21328 | .addtlb = NULL, | ||
21329 | .addtllen = 0, | ||
21330 | .pers = (unsigned char *) | ||
21331 | "\xa8\x80\xec\x98\x30\x98\x15\xd2\xc6\xc4\x68\xf1" | ||
21332 | "\x3a\x1c\xbf\xce\x6a\x40\x14\xeb\x36\x99\x53\xda" | ||
21333 | "\x57\x6b\xce\xa4\x1c\x66\x3d\xbc", | ||
21334 | .perslen = 32, | ||
21335 | }, { | ||
21336 | .entropy = (unsigned char *) | ||
21337 | "\x69\xed\x82\xa9\xc5\x7b\xbf\xe5\x1d\x2f\xcb\x7a" | ||
21338 | "\xd3\x50\x7d\x96\xb4\xb9\x2b\x50\x77\x51\x27\x74" | ||
21339 | "\x33\x74\xba\xf1\x30\xdf\x8e\xdf\x87\x1d\x87\xbc" | ||
21340 | "\x96\xb2\xc3\xa7\xed\x60\x5e\x61\x4e\x51\x29\x1a", | ||
21341 | .entropylen = 48, | ||
21342 | .expected = (unsigned char *) | ||
21343 | "\xa5\x71\x24\x31\x11\xfe\x13\xe1\xa8\x24\x12\xfb" | ||
21344 | "\x37\xa1\x27\xa5\xab\x77\xa1\x9f\xae\x8f\xaf\x13" | ||
21345 | "\x93\xf7\x53\x85\x91\xb6\x1b\xab\xd4\x6b\xea\xb6" | ||
21346 | "\xef\xda\x4c\x90\x6e\xef\x5f\xde\xe1\xc7\x10\x36" | ||
21347 | "\xd5\x67\xbd\x14\xb6\x89\x21\x0c\xc9\x92\x65\x64" | ||
21348 | "\xd0\xf3\x23\xe0\x7f\xd1\xe8\x75\xc2\x85\x06\xea" | ||
21349 | "\xca\xc0\xcb\x79\x2d\x29\x82\xfc\xaa\x9a\xc6\x95" | ||
21350 | "\x7e\xdc\x88\x65\xba\xec\x0e\x16\x87\xec\xa3\x9e" | ||
21351 | "\xd8\x8c\x80\xab\x3a\x64\xe0\xcb\x0e\x45\x98\xdd" | ||
21352 | "\x7c\x6c\x6c\x26\x11\x13\xc8\xce\xa9\x47\xa6\x06" | ||
21353 | "\x57\xa2\x66\xbb\x2d\x7f\xf3\xc1", | ||
21354 | .expectedlen = 128, | ||
21355 | .addtla = (unsigned char *) | ||
21356 | "\x74\xd3\x6d\xda\xe8\xd6\x86\x5f\x63\x01\xfd\xf2" | ||
21357 | "\x7d\x06\x29\x6d\x94\xd1\x66\xf0\xd2\x72\x67\x4e" | ||
21358 | "\x77\xc5\x3d\x9e\x03\xe3\xa5\x78", | ||
21359 | .addtlb = (unsigned char *) | ||
21360 | "\xf6\xb6\x3d\xf0\x7c\x26\x04\xc5\x8b\xcd\x3e\x6a" | ||
21361 | "\x9f\x9c\x3a\x2e\xdb\x47\x87\xe5\x8e\x00\x5e\x2b" | ||
21362 | "\x74\x7f\xa6\xf6\x80\xcd\x9b\x21", | ||
21363 | .addtllen = 32, | ||
21364 | .pers = (unsigned char *) | ||
21365 | "\x74\xa6\xe0\x08\xf9\x27\xee\x1d\x6e\x3c\x28\x20" | ||
21366 | "\x87\xdd\xd7\x54\x31\x47\x78\x4b\xe5\x6d\xa3\x73" | ||
21367 | "\xa9\x65\xb1\x10\xc1\xdc\x77\x7c", | ||
21368 | .perslen = 32, | ||
21369 | }, | ||
21370 | }; | ||
21371 | |||
21372 | static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = { | ||
21373 | { | ||
21374 | .entropy = (unsigned char *) | ||
21375 | "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c" | ||
21376 | "\xbd\xc4\x6e\x68\x31\xe4\x4d\x34\xa4\xfb\x93\x5e" | ||
21377 | "\xe2\x85\xdd\x14\xb7\x1a\x74\x88\x65\x9b\xa9\x6c" | ||
21378 | "\x60\x1d\xc6\x9f\xc9\x02\x94\x08\x05\xec\x0c\xa8", | ||
21379 | .entropylen = 48, | ||
21380 | .expected = (unsigned char *) | ||
21381 | "\xe5\x28\xe9\xab\xf2\xde\xce\x54\xd4\x7c\x7e\x75" | ||
21382 | "\xe5\xfe\x30\x21\x49\xf8\x17\xea\x9f\xb4\xbe\xe6" | ||
21383 | "\xf4\x19\x96\x97\xd0\x4d\x5b\x89\xd5\x4f\xbb\x97" | ||
21384 | "\x8a\x15\xb5\xc4\x43\xc9\xec\x21\x03\x6d\x24\x60" | ||
21385 | "\xb6\xf7\x3e\xba\xd0\xdc\x2a\xba\x6e\x62\x4a\xbf" | ||
21386 | "\x07\x74\x5b\xc1\x07\x69\x4b\xb7\x54\x7b\xb0\x99" | ||
21387 | "\x5f\x70\xde\x25\xd6\xb2\x9e\x2d\x30\x11\xbb\x19" | ||
21388 | "\xd2\x76\x76\xc0\x71\x62\xc8\xb5\xcc\xde\x06\x68" | ||
21389 | "\x96\x1d\xf8\x68\x03\x48\x2c\xb3\x7e\xd6\xd5\xc0" | ||
21390 | "\xbb\x8d\x50\xcf\x1f\x50\xd4\x76\xaa\x04\x58\xbd" | ||
21391 | "\xab\xa8\x06\xf4\x8b\xe9\xdc\xb8", | ||
21392 | .expectedlen = 128, | ||
21393 | .addtla = NULL, | ||
21394 | .addtlb = NULL, | ||
21395 | .addtllen = 0, | ||
21396 | .pers = NULL, | ||
21397 | .perslen = 0, | ||
21398 | }, { | ||
21399 | .entropy = (unsigned char *) | ||
21400 | "\xf9\x7a\x3c\xfd\x91\xfa\xa0\x46\xb9\xe6\x1b\x94" | ||
21401 | "\x93\xd4\x36\xc4\x93\x1f\x60\x4b\x22\xf1\x08\x15" | ||
21402 | "\x21\xb3\x41\x91\x51\xe8\xff\x06\x11\xf3\xa7\xd4" | ||
21403 | "\x35\x95\x35\x7d\x58\x12\x0b\xd1\xe2\xdd\x8a\xed", | ||
21404 | .entropylen = 48, | ||
21405 | .expected = (unsigned char *) | ||
21406 | "\xc6\x87\x1c\xff\x08\x24\xfe\x55\xea\x76\x89\xa5" | ||
21407 | "\x22\x29\x88\x67\x30\x45\x0e\x5d\x36\x2d\xa5\xbf" | ||
21408 | "\x59\x0d\xcf\x9a\xcd\x67\xfe\xd4\xcb\x32\x10\x7d" | ||
21409 | "\xf5\xd0\x39\x69\xa6\x6b\x1f\x64\x94\xfd\xf5\xd6" | ||
21410 | "\x3d\x5b\x4d\x0d\x34\xea\x73\x99\xa0\x7d\x01\x16" | ||
21411 | "\x12\x6d\x0d\x51\x8c\x7c\x55\xba\x46\xe1\x2f\x62" | ||
21412 | "\xef\xc8\xfe\x28\xa5\x1c\x9d\x42\x8e\x6d\x37\x1d" | ||
21413 | "\x73\x97\xab\x31\x9f\xc7\x3d\xed\x47\x22\xe5\xb4" | ||
21414 | "\xf3\x00\x04\x03\x2a\x61\x28\xdf\x5e\x74\x97\xec" | ||
21415 | "\xf8\x2c\xa7\xb0\xa5\x0e\x86\x7e\xf6\x72\x8a\x4f" | ||
21416 | "\x50\x9a\x8c\x85\x90\x87\x03\x9c", | ||
21417 | .expectedlen = 128, | ||
21418 | .addtla = (unsigned char *) | ||
21419 | "\x51\x72\x89\xaf\xe4\x44\xa0\xfe\x5e\xd1\xa4\x1d" | ||
21420 | "\xbb\xb5\xeb\x17\x15\x00\x79\xbd\xd3\x1e\x29\xcf" | ||
21421 | "\x2f\xf3\x00\x34\xd8\x26\x8e\x3b", | ||
21422 | .addtlb = (unsigned char *) | ||
21423 | "\x88\x02\x8d\x29\xef\x80\xb4\xe6\xf0\xfe\x12\xf9" | ||
21424 | "\x1d\x74\x49\xfe\x75\x06\x26\x82\xe8\x9c\x57\x14" | ||
21425 | "\x40\xc0\xc9\xb5\x2c\x42\xa6\xe0", | ||
21426 | .addtllen = 32, | ||
21427 | .pers = NULL, | ||
21428 | .perslen = 0, | ||
21429 | }, { | ||
21430 | .entropy = (unsigned char *) | ||
21431 | "\x8d\xf0\x13\xb4\xd1\x03\x52\x30\x73\x91\x7d\xdf" | ||
21432 | "\x6a\x86\x97\x93\x05\x9e\x99\x43\xfc\x86\x54\x54" | ||
21433 | "\x9e\x7a\xb2\x2f\x7c\x29\xf1\x22\xda\x26\x25\xaf" | ||
21434 | "\x2d\xdd\x4a\xbc\xce\x3c\xf4\xfa\x46\x59\xd8\x4e", | ||
21435 | .entropylen = 48, | ||
21436 | .expected = (unsigned char *) | ||
21437 | "\xb9\x1c\xba\x4c\xc8\x4f\xa2\x5d\xf8\x61\x0b\x81" | ||
21438 | "\xb6\x41\x40\x27\x68\xa2\x09\x72\x34\x93\x2e\x37" | ||
21439 | "\xd5\x90\xb1\x15\x4c\xbd\x23\xf9\x74\x52\xe3\x10" | ||
21440 | "\xe2\x91\xc4\x51\x46\x14\x7f\x0d\xa2\xd8\x17\x61" | ||
21441 | "\xfe\x90\xfb\xa6\x4f\x94\x41\x9c\x0f\x66\x2b\x28" | ||
21442 | "\xc1\xed\x94\xda\x48\x7b\xb7\xe7\x3e\xec\x79\x8f" | ||
21443 | "\xbc\xf9\x81\xb7\x91\xd1\xbe\x4f\x17\x7a\x89\x07" | ||
21444 | "\xaa\x3c\x40\x16\x43\xa5\xb6\x2b\x87\xb8\x9d\x66" | ||
21445 | "\xb3\xa6\x0e\x40\xd4\xa8\xe4\xe9\xd8\x2a\xf6\xd2" | ||
21446 | "\x70\x0e\x6f\x53\x5c\xdb\x51\xf7\x5c\x32\x17\x29" | ||
21447 | "\x10\x37\x41\x03\x0c\xcc\x3a\x56", | ||
21448 | .expectedlen = 128, | ||
21449 | .addtla = NULL, | ||
21450 | .addtlb = NULL, | ||
21451 | .addtllen = 0, | ||
21452 | .pers = (unsigned char *) | ||
21453 | "\xb5\x71\xe6\x6d\x7c\x33\x8b\xc0\x7b\x76\xad\x37" | ||
21454 | "\x57\xbb\x2f\x94\x52\xbf\x7e\x07\x43\x7a\xe8\x58" | ||
21455 | "\x1c\xe7\xbc\x7c\x3a\xc6\x51\xa9", | ||
21456 | .perslen = 32, | ||
21457 | }, { | ||
21458 | .entropy = (unsigned char *) | ||
21459 | "\xc2\xa5\x66\xa9\xa1\x81\x7b\x15\xc5\xc3\xb7\x78" | ||
21460 | "\x17\x7a\xc8\x7c\x24\xe7\x97\xbe\x0a\x84\x5f\x11" | ||
21461 | "\xc2\xfe\x39\x9d\xd3\x77\x32\xf2\xcb\x18\x94\xeb" | ||
21462 | "\x2b\x97\xb3\xc5\x6e\x62\x83\x29\x51\x6f\x86\xec", | ||
21463 | .entropylen = 48, | ||
21464 | .expected = (unsigned char *) | ||
21465 | "\xb3\xa3\x69\x8d\x77\x76\x99\xa0\xdd\x9f\xa3\xf0" | ||
21466 | "\xa9\xfa\x57\x83\x2d\x3c\xef\xac\x5d\xf2\x44\x37" | ||
21467 | "\xc6\xd7\x3a\x0f\xe4\x10\x40\xf1\x72\x90\x38\xae" | ||
21468 | "\xf1\xe9\x26\x35\x2e\xa5\x9d\xe1\x20\xbf\xb7\xb0" | ||
21469 | "\x73\x18\x3a\x34\x10\x6e\xfe\xd6\x27\x8f\xf8\xad" | ||
21470 | "\x84\x4b\xa0\x44\x81\x15\xdf\xdd\xf3\x31\x9a\x82" | ||
21471 | "\xde\x6b\xb1\x1d\x80\xbd\x87\x1a\x9a\xcd\x35\xc7" | ||
21472 | "\x36\x45\xe1\x27\x0f\xb9\xfe\x4f\xa8\x8e\xc0\xe4" | ||
21473 | "\x65\x40\x9e\xa0\xcb\xa8\x09\xfe\x2f\x45\xe0\x49" | ||
21474 | "\x43\xa2\xe3\x96\xbb\xb7\xdd\x2f\x4e\x07\x95\x30" | ||
21475 | "\x35\x24\xcc\x9c\xc5\xea\x54\xa1", | ||
21476 | .expectedlen = 128, | ||
21477 | .addtla = (unsigned char *) | ||
21478 | "\x41\x3d\xd8\x3f\xe5\x68\x35\xab\xd4\x78\xcb\x96" | ||
21479 | "\x93\xd6\x76\x35\x90\x1c\x40\x23\x9a\x26\x64\x62" | ||
21480 | "\xd3\x13\x3b\x83\xe4\x9c\x82\x0b", | ||
21481 | .addtlb = (unsigned char *) | ||
21482 | "\xd5\xc4\xa7\x1f\x9d\x6d\x95\xa1\xbe\xdf\x0b\xd2" | ||
21483 | "\x24\x7c\x27\x7d\x1f\x84\xa4\xe5\x7a\x4a\x88\x25" | ||
21484 | "\xb8\x2a\x2d\x09\x7d\xe6\x3e\xf1", | ||
21485 | .addtllen = 32, | ||
21486 | .pers = (unsigned char *) | ||
21487 | "\x13\xce\x4d\x8d\xd2\xdb\x97\x96\xf9\x41\x56\xc8" | ||
21488 | "\xe8\xf0\x76\x9b\x0a\xa1\xc8\x2c\x13\x23\xb6\x15" | ||
21489 | "\x36\x60\x3b\xca\x37\xc9\xee\x29", | ||
21490 | .perslen = 32, | ||
21491 | }, | ||
21492 | }; | ||
21493 | |||
21494 | static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = { | ||
21495 | { | ||
21496 | .entropy = (unsigned char *) | ||
21497 | "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9" | ||
21498 | "\x6c\x95\xb8\xf1\xc9\xa8\xf9\xcb\x24\x5a\x8b\x40" | ||
21499 | "\xf3\xa6\xe5\xa7\xfb\xd9\xd3\xc6\x8e\x27\x7b\xa9" | ||
21500 | "\xac\x9b\xbb\x00", | ||
21501 | .entropylen = 40, | ||
21502 | .expected = (unsigned char *) | ||
21503 | "\x8c\x2e\x72\xab\xfd\x9b\xb8\x28\x4d\xb7\x9e\x17" | ||
21504 | "\xa4\x3a\x31\x46\xcd\x76\x94\xe3\x52\x49\xfc\x33" | ||
21505 | "\x83\x91\x4a\x71\x17\xf4\x13\x68\xe6\xd4\xf1\x48" | ||
21506 | "\xff\x49\xbf\x29\x07\x6b\x50\x15\xc5\x9f\x45\x79" | ||
21507 | "\x45\x66\x2e\x3d\x35\x03\x84\x3f\x4a\xa5\xa3\xdf" | ||
21508 | "\x9a\x9d\xf1\x0d", | ||
21509 | .expectedlen = 64, | ||
21510 | .addtla = NULL, | ||
21511 | .addtlb = NULL, | ||
21512 | .addtllen = 0, | ||
21513 | .pers = NULL, | ||
21514 | .perslen = 0, | ||
21515 | }, | ||
21516 | }; | ||
21517 | |||
21518 | static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = { | ||
21519 | { | ||
21520 | .entropy = (unsigned char *) | ||
21521 | "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f" | ||
21522 | "\x21\x1d\x78\xa0\xb9\x38\x9a\x74\xe5\xbc\xcf\xec" | ||
21523 | "\xe8\xd7\x66\xaf\x1a\x6d\x3b\x14\x49\x6f\x25\xb0" | ||
21524 | "\xf1\x30\x1b\x4f\x50\x1b\xe3\x03\x80\xa1\x37\xeb", | ||
21525 | .entropylen = 48, | ||
21526 | .expected = (unsigned char *) | ||
21527 | "\x58\x62\xeb\x38\xbd\x55\x8d\xd9\x78\xa6\x96\xe6" | ||
21528 | "\xdf\x16\x47\x82\xdd\xd8\x87\xe7\xe9\xa6\xc9\xf3" | ||
21529 | "\xf1\xfb\xaf\xb7\x89\x41\xb5\x35\xa6\x49\x12\xdf" | ||
21530 | "\xd2\x24\xc6\xdc\x74\x54\xe5\x25\x0b\x3d\x97\x16" | ||
21531 | "\x5e\x16\x26\x0c\x2f\xaf\x1c\xc7\x73\x5c\xb7\x5f" | ||
21532 | "\xb4\xf0\x7e\x1d", | ||
21533 | .expectedlen = 64, | ||
21534 | .addtla = NULL, | ||
21535 | .addtlb = NULL, | ||
21536 | .addtllen = 0, | ||
21537 | .pers = NULL, | ||
21538 | .perslen = 0, | ||
21539 | }, | ||
21540 | }; | ||
21541 | |||
21542 | static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = { | ||
21543 | { | ||
21544 | .entropy = (unsigned char *) | ||
21545 | "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8" | ||
21546 | "\x64\xbf\xf2\x64\xa3\x9e\x98\xdb\x6c\x10\x78\x7f", | ||
21547 | .entropylen = 24, | ||
21548 | .expected = (unsigned char *) | ||
21549 | "\x2c\x14\x7e\x24\x11\x9a\xd8\xd4\xb2\xed\x61\xc1" | ||
21550 | "\x53\xd0\x50\xc9\x24\xff\x59\x75\x15\xf1\x17\x3a" | ||
21551 | "\x3d\xf4\x4b\x2c\x84\x28\xef\x89\x0e\xb9\xde\xf3" | ||
21552 | "\xe4\x78\x04\xb2\xfd\x9b\x35\x7f\xe1\x3f\x8a\x3e" | ||
21553 | "\x10\xc8\x67\x0a\xf9\xdf\x2d\x6c\x96\xfb\xb2\xb8" | ||
21554 | "\xcb\x2d\xd6\xb0", | ||
21555 | .expectedlen = 64, | ||
21556 | .addtla = NULL, | ||
21557 | .addtlb = NULL, | ||
21558 | .addtllen = 0, | ||
21559 | .pers = NULL, | ||
21560 | .perslen = 0, | ||
21561 | }, { | ||
21562 | .entropy = (unsigned char *) | ||
21563 | "\x71\xbd\xce\x35\x42\x7d\x20\xbf\x58\xcf\x17\x74" | ||
21564 | "\xce\x72\xd8\x33\x34\x50\x2d\x8f\x5b\x14\xc4\xdd", | ||
21565 | .entropylen = 24, | ||
21566 | .expected = (unsigned char *) | ||
21567 | "\x97\x33\xe8\x20\x12\xe2\x7b\xa1\x46\x8f\xf2\x34" | ||
21568 | "\xb3\xc9\xb6\x6b\x20\xb2\x4f\xee\x27\xd8\x0b\x21" | ||
21569 | "\x8c\xff\x63\x73\x69\x29\xfb\xf3\x85\xcd\x88\x8e" | ||
21570 | "\x43\x2c\x71\x8b\xa2\x55\xd2\x0f\x1d\x7f\xe3\xe1" | ||
21571 | "\x2a\xa3\xe9\x2c\x25\x89\xc7\x14\x52\x99\x56\xcc" | ||
21572 | "\xc3\xdf\xb3\x81", | ||
21573 | .expectedlen = 64, | ||
21574 | .addtla = (unsigned char *) | ||
21575 | "\x66\xef\x42\xd6\x9a\x8c\x3d\x6d\x4a\x9e\x95\xa6" | ||
21576 | "\x91\x4d\x81\x56", | ||
21577 | .addtlb = (unsigned char *) | ||
21578 | "\xe3\x18\x83\xd9\x4b\x5e\xc4\xcc\xaa\x61\x2f\xbb" | ||
21579 | "\x4a\x55\xd1\xc6", | ||
21580 | .addtllen = 16, | ||
21581 | .pers = NULL, | ||
21582 | .perslen = 0, | ||
21583 | }, { | ||
21584 | .entropy = (unsigned char *) | ||
21585 | "\xca\x4b\x1e\xfa\x75\xbd\x69\x36\x38\x73\xb8\xf9" | ||
21586 | "\xdb\x4d\x35\x0e\x47\xbf\x6c\x37\x72\xfd\xf7\xa9", | ||
21587 | .entropylen = 24, | ||
21588 | .expected = (unsigned char *) | ||
21589 | "\x59\xc3\x19\x79\x1b\xb1\xf3\x0e\xe9\x34\xae\x6e" | ||
21590 | "\x8b\x1f\xad\x1f\x74\xca\x25\x45\x68\xb8\x7f\x75" | ||
21591 | "\x12\xf8\xf2\xab\x4c\x23\x01\x03\x05\xe1\x70\xee" | ||
21592 | "\x75\xd8\xcb\xeb\x23\x4c\x7a\x23\x6e\x12\x27\xdb" | ||
21593 | "\x6f\x7a\xac\x3c\x44\xb7\x87\x4b\x65\x56\x74\x45" | ||
21594 | "\x34\x30\x0c\x3d", | ||
21595 | .expectedlen = 64, | ||
21596 | .addtla = NULL, | ||
21597 | .addtlb = NULL, | ||
21598 | .addtllen = 0, | ||
21599 | .pers = (unsigned char *) | ||
21600 | "\xeb\xaa\x60\x2c\x4d\xbe\x33\xff\x1b\xef\xbf\x0a" | ||
21601 | "\x0b\xc6\x97\x54", | ||
21602 | .perslen = 16, | ||
21603 | }, { | ||
21604 | .entropy = (unsigned char *) | ||
21605 | "\xc0\x70\x1f\x92\x50\x75\x8f\xcd\xf2\xbe\x73\x98" | ||
21606 | "\x80\xdb\x66\xeb\x14\x68\xb4\xa5\x87\x9c\x2d\xa6", | ||
21607 | .entropylen = 24, | ||
21608 | .expected = (unsigned char *) | ||
21609 | "\x97\xc0\xc0\xe5\xa0\xcc\xf2\x4f\x33\x63\x48\x8a" | ||
21610 | "\xdb\x13\x0a\x35\x89\xbf\x80\x65\x62\xee\x13\x95" | ||
21611 | "\x7c\x33\xd3\x7d\xf4\x07\x77\x7a\x2b\x65\x0b\x5f" | ||
21612 | "\x45\x5c\x13\xf1\x90\x77\x7f\xc5\x04\x3f\xcc\x1a" | ||
21613 | "\x38\xf8\xcd\x1b\xbb\xd5\x57\xd1\x4a\x4c\x2e\x8a" | ||
21614 | "\x2b\x49\x1e\x5c", | ||
21615 | .expectedlen = 64, | ||
21616 | .addtla = (unsigned char *) | ||
21617 | "\xf9\x01\xf8\x16\x7a\x1d\xff\xde\x8e\x3c\x83\xe2" | ||
21618 | "\x44\x85\xe7\xfe", | ||
21619 | .addtlb = (unsigned char *) | ||
21620 | "\x17\x1c\x09\x38\xc2\x38\x9f\x97\x87\x60\x55\xb4" | ||
21621 | "\x82\x16\x62\x7f", | ||
21622 | .addtllen = 16, | ||
21623 | .pers = (unsigned char *) | ||
21624 | "\x80\x08\xae\xe8\xe9\x69\x40\xc5\x08\x73\xc7\x9f" | ||
21625 | "\x8e\xcf\xe0\x02", | ||
21626 | .perslen = 16, | ||
21627 | }, | ||
21628 | }; | ||
21629 | |||
20746 | /* Cast5 test vectors from RFC 2144 */ | 21630 | /* Cast5 test vectors from RFC 2144 */ |
20747 | #define CAST5_ENC_TEST_VECTORS 4 | 21631 | #define CAST5_ENC_TEST_VECTORS 4 |
20748 | #define CAST5_DEC_TEST_VECTORS 4 | 21632 | #define CAST5_DEC_TEST_VECTORS 4 |
@@ -20907,8 +21791,8 @@ static struct cipher_testvec cast5_enc_tv_template[] = { | |||
20907 | "\xF5\xBC\x25\xD6\x02\x56\x57\x1C", | 21791 | "\xF5\xBC\x25\xD6\x02\x56\x57\x1C", |
20908 | .rlen = 496, | 21792 | .rlen = 496, |
20909 | .also_non_np = 1, | 21793 | .also_non_np = 1, |
20910 | .np = 2, | 21794 | .np = 3, |
20911 | .tap = { 496 - 16, 16 }, | 21795 | .tap = { 496 - 20, 4, 16 }, |
20912 | }, | 21796 | }, |
20913 | }; | 21797 | }; |
20914 | 21798 | ||
@@ -21068,8 +21952,8 @@ static struct cipher_testvec cast5_dec_tv_template[] = { | |||
21068 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 21952 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
21069 | .rlen = 496, | 21953 | .rlen = 496, |
21070 | .also_non_np = 1, | 21954 | .also_non_np = 1, |
21071 | .np = 2, | 21955 | .np = 3, |
21072 | .tap = { 496 - 16, 16 }, | 21956 | .tap = { 496 - 20, 4, 16 }, |
21073 | }, | 21957 | }, |
21074 | }; | 21958 | }; |
21075 | 21959 | ||
@@ -21206,8 +22090,8 @@ static struct cipher_testvec cast5_cbc_enc_tv_template[] = { | |||
21206 | "\x1D\x18\x66\x44\x5B\x8F\x14\xEB", | 22090 | "\x1D\x18\x66\x44\x5B\x8F\x14\xEB", |
21207 | .rlen = 496, | 22091 | .rlen = 496, |
21208 | .also_non_np = 1, | 22092 | .also_non_np = 1, |
21209 | .np = 2, | 22093 | .np = 3, |
21210 | .tap = { 496 - 16, 16 }, | 22094 | .tap = { 496 - 20, 4, 16 }, |
21211 | }, | 22095 | }, |
21212 | }; | 22096 | }; |
21213 | 22097 | ||
@@ -21344,8 +22228,8 @@ static struct cipher_testvec cast5_cbc_dec_tv_template[] = { | |||
21344 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 22228 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
21345 | .rlen = 496, | 22229 | .rlen = 496, |
21346 | .also_non_np = 1, | 22230 | .also_non_np = 1, |
21347 | .np = 2, | 22231 | .np = 3, |
21348 | .tap = { 496 - 16, 16 }, | 22232 | .tap = { 496 - 20, 4, 16 }, |
21349 | }, | 22233 | }, |
21350 | }; | 22234 | }; |
21351 | 22235 | ||
@@ -21495,8 +22379,8 @@ static struct cipher_testvec cast5_ctr_enc_tv_template[] = { | |||
21495 | "\xC0\x0D\x96\xAA\x23\xF8\xFE\x13", | 22379 | "\xC0\x0D\x96\xAA\x23\xF8\xFE\x13", |
21496 | .rlen = 496, | 22380 | .rlen = 496, |
21497 | .also_non_np = 1, | 22381 | .also_non_np = 1, |
21498 | .np = 2, | 22382 | .np = 3, |
21499 | .tap = { 496 - 16, 16 }, | 22383 | .tap = { 496 - 20, 4, 16 }, |
21500 | }, | 22384 | }, |
21501 | }; | 22385 | }; |
21502 | 22386 | ||
@@ -21646,8 +22530,8 @@ static struct cipher_testvec cast5_ctr_dec_tv_template[] = { | |||
21646 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | 22530 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", |
21647 | .rlen = 496, | 22531 | .rlen = 496, |
21648 | .also_non_np = 1, | 22532 | .also_non_np = 1, |
21649 | .np = 2, | 22533 | .np = 3, |
21650 | .tap = { 496 - 16, 16 }, | 22534 | .tap = { 496 - 20, 4, 16 }, |
21651 | }, | 22535 | }, |
21652 | }; | 22536 | }; |
21653 | 22537 | ||
@@ -22805,8 +23689,8 @@ static struct cipher_testvec camellia_enc_tv_template[] = { | |||
22805 | "\x33\x1A\xBB\xD3\xA2\x7E\x97\x66", | 23689 | "\x33\x1A\xBB\xD3\xA2\x7E\x97\x66", |
22806 | .rlen = 1008, | 23690 | .rlen = 1008, |
22807 | .also_non_np = 1, | 23691 | .also_non_np = 1, |
22808 | .np = 2, | 23692 | .np = 3, |
22809 | .tap = { 1008 - 16, 16 }, | 23693 | .tap = { 1008 - 20, 4, 16 }, |
22810 | }, | 23694 | }, |
22811 | }; | 23695 | }; |
22812 | 23696 | ||
@@ -23105,8 +23989,8 @@ static struct cipher_testvec camellia_dec_tv_template[] = { | |||
23105 | "\x72\x09\xA0\x14\xAB\x42\xD9\x4D", | 23989 | "\x72\x09\xA0\x14\xAB\x42\xD9\x4D", |
23106 | .rlen = 1008, | 23990 | .rlen = 1008, |
23107 | .also_non_np = 1, | 23991 | .also_non_np = 1, |
23108 | .np = 2, | 23992 | .np = 3, |
23109 | .tap = { 1008 - 16, 16 }, | 23993 | .tap = { 1008 - 20, 4, 16 }, |
23110 | }, | 23994 | }, |
23111 | }; | 23995 | }; |
23112 | 23996 | ||
@@ -23401,8 +24285,8 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = { | |||
23401 | "\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C", | 24285 | "\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C", |
23402 | .rlen = 1008, | 24286 | .rlen = 1008, |
23403 | .also_non_np = 1, | 24287 | .also_non_np = 1, |
23404 | .np = 2, | 24288 | .np = 3, |
23405 | .tap = { 1008 - 16, 16 }, | 24289 | .tap = { 1008 - 20, 4, 16 }, |
23406 | }, | 24290 | }, |
23407 | }; | 24291 | }; |
23408 | 24292 | ||
@@ -23697,8 +24581,8 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = { | |||
23697 | "\x72\x09\xA0\x14\xAB\x42\xD9\x4D", | 24581 | "\x72\x09\xA0\x14\xAB\x42\xD9\x4D", |
23698 | .rlen = 1008, | 24582 | .rlen = 1008, |
23699 | .also_non_np = 1, | 24583 | .also_non_np = 1, |
23700 | .np = 2, | 24584 | .np = 3, |
23701 | .tap = { 1008 - 16, 16 }, | 24585 | .tap = { 1008 - 20, 4, 16 }, |
23702 | }, | 24586 | }, |
23703 | }; | 24587 | }; |
23704 | 24588 | ||
@@ -25283,8 +26167,8 @@ static struct cipher_testvec camellia_lrw_enc_tv_template[] = { | |||
25283 | "\x5a\xa8\x92\x7f\xba\xe6\x0c\x95", | 26167 | "\x5a\xa8\x92\x7f\xba\xe6\x0c\x95", |
25284 | .rlen = 512, | 26168 | .rlen = 512, |
25285 | .also_non_np = 1, | 26169 | .also_non_np = 1, |
25286 | .np = 2, | 26170 | .np = 3, |
25287 | .tap = { 512 - 16, 16 }, | 26171 | .tap = { 512 - 20, 4, 16 }, |
25288 | }, | 26172 | }, |
25289 | }; | 26173 | }; |
25290 | 26174 | ||
@@ -25536,8 +26420,8 @@ static struct cipher_testvec camellia_lrw_dec_tv_template[] = { | |||
25536 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", | 26420 | "\x21\xc4\xc2\x75\x67\x89\x37\x0a", |
25537 | .rlen = 512, | 26421 | .rlen = 512, |
25538 | .also_non_np = 1, | 26422 | .also_non_np = 1, |
25539 | .np = 2, | 26423 | .np = 3, |
25540 | .tap = { 512 - 16, 16 }, | 26424 | .tap = { 512 - 20, 4, 16 }, |
25541 | }, | 26425 | }, |
25542 | }; | 26426 | }; |
25543 | 26427 | ||
@@ -25878,8 +26762,8 @@ static struct cipher_testvec camellia_xts_enc_tv_template[] = { | |||
25878 | "\xd5\xc6\x99\xcc\x4e\x6c\x94\x95", | 26762 | "\xd5\xc6\x99\xcc\x4e\x6c\x94\x95", |
25879 | .rlen = 512, | 26763 | .rlen = 512, |
25880 | .also_non_np = 1, | 26764 | .also_non_np = 1, |
25881 | .np = 2, | 26765 | .np = 3, |
25882 | .tap = { 512 - 16, 16 }, | 26766 | .tap = { 512 - 20, 4, 16 }, |
25883 | }, | 26767 | }, |
25884 | }; | 26768 | }; |
25885 | 26769 | ||
@@ -26221,8 +27105,8 @@ static struct cipher_testvec camellia_xts_dec_tv_template[] = { | |||
26221 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | 27105 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", |
26222 | .rlen = 512, | 27106 | .rlen = 512, |
26223 | .also_non_np = 1, | 27107 | .also_non_np = 1, |
26224 | .np = 2, | 27108 | .np = 3, |
26225 | .tap = { 512 - 16, 16 }, | 27109 | .tap = { 512 - 20, 4, 16 }, |
26226 | }, | 27110 | }, |
26227 | }; | 27111 | }; |
26228 | 27112 | ||
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 02f177aeb16c..2fb0fdfc87df 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA | |||
391 | 391 | ||
392 | config CRYPTO_DEV_CCP | 392 | config CRYPTO_DEV_CCP |
393 | bool "Support for AMD Cryptographic Coprocessor" | 393 | bool "Support for AMD Cryptographic Coprocessor" |
394 | depends on X86 && PCI | 394 | depends on (X86 && PCI) || ARM64 |
395 | default n | 395 | default n |
396 | help | 396 | help |
397 | The AMD Cryptographic Coprocessor provides hardware support | 397 | The AMD Cryptographic Coprocessor provides hardware support |
@@ -418,4 +418,22 @@ config CRYPTO_DEV_MXS_DCP | |||
418 | To compile this driver as a module, choose M here: the module | 418 | To compile this driver as a module, choose M here: the module |
419 | will be called mxs-dcp. | 419 | will be called mxs-dcp. |
420 | 420 | ||
421 | source "drivers/crypto/qat/Kconfig" | ||
422 | |||
423 | config CRYPTO_DEV_QCE | ||
424 | tristate "Qualcomm crypto engine accelerator" | ||
425 | depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM | ||
426 | select CRYPTO_AES | ||
427 | select CRYPTO_DES | ||
428 | select CRYPTO_ECB | ||
429 | select CRYPTO_CBC | ||
430 | select CRYPTO_XTS | ||
431 | select CRYPTO_CTR | ||
432 | select CRYPTO_ALGAPI | ||
433 | select CRYPTO_BLKCIPHER | ||
434 | help | ||
435 | This driver supports Qualcomm crypto engine accelerator | ||
436 | hardware. To compile this driver as a module, choose M here. The | ||
437 | module will be called qcrypto. | ||
438 | |||
421 | endif # CRYPTO_HW | 439 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 482f090d16d0..3924f93d5774 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -23,3 +23,5 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | |||
23 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o | 23 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o |
24 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o | 24 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
25 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ | 25 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ |
26 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ | ||
27 | obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ | ||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 37f9cc98ba17..e4c6c58fbb03 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1292,7 +1292,7 @@ static struct platform_driver crypto4xx_driver = { | |||
1292 | .of_match_table = crypto4xx_match, | 1292 | .of_match_table = crypto4xx_match, |
1293 | }, | 1293 | }, |
1294 | .probe = crypto4xx_probe, | 1294 | .probe = crypto4xx_probe, |
1295 | .remove = crypto4xx_remove, | 1295 | .remove = __exit_p(crypto4xx_remove), |
1296 | }; | 1296 | }; |
1297 | 1297 | ||
1298 | module_platform_driver(crypto4xx_driver); | 1298 | module_platform_driver(crypto4xx_driver); |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 0618be06b9fb..9a4f69eaa5e0 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
@@ -1353,7 +1353,6 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd | |||
1353 | GFP_KERNEL); | 1353 | GFP_KERNEL); |
1354 | if (!pdata->dma_slave) { | 1354 | if (!pdata->dma_slave) { |
1355 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); | 1355 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); |
1356 | devm_kfree(&pdev->dev, pdata); | ||
1357 | return ERR_PTR(-ENOMEM); | 1356 | return ERR_PTR(-ENOMEM); |
1358 | } | 1357 | } |
1359 | 1358 | ||
@@ -1375,7 +1374,8 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
1375 | unsigned long sha_phys_size; | 1374 | unsigned long sha_phys_size; |
1376 | int err; | 1375 | int err; |
1377 | 1376 | ||
1378 | sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL); | 1377 | sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev), |
1378 | GFP_KERNEL); | ||
1379 | if (sha_dd == NULL) { | 1379 | if (sha_dd == NULL) { |
1380 | dev_err(dev, "unable to alloc data struct.\n"); | 1380 | dev_err(dev, "unable to alloc data struct.\n"); |
1381 | err = -ENOMEM; | 1381 | err = -ENOMEM; |
@@ -1490,8 +1490,6 @@ clk_err: | |||
1490 | free_irq(sha_dd->irq, sha_dd); | 1490 | free_irq(sha_dd->irq, sha_dd); |
1491 | res_err: | 1491 | res_err: |
1492 | tasklet_kill(&sha_dd->done_task); | 1492 | tasklet_kill(&sha_dd->done_task); |
1493 | kfree(sha_dd); | ||
1494 | sha_dd = NULL; | ||
1495 | sha_dd_err: | 1493 | sha_dd_err: |
1496 | dev_err(dev, "initialization failed.\n"); | 1494 | dev_err(dev, "initialization failed.\n"); |
1497 | 1495 | ||
@@ -1523,9 +1521,6 @@ static int atmel_sha_remove(struct platform_device *pdev) | |||
1523 | if (sha_dd->irq >= 0) | 1521 | if (sha_dd->irq >= 0) |
1524 | free_irq(sha_dd->irq, sha_dd); | 1522 | free_irq(sha_dd->irq, sha_dd); |
1525 | 1523 | ||
1526 | kfree(sha_dd); | ||
1527 | sha_dd = NULL; | ||
1528 | |||
1529 | return 0; | 1524 | return 0; |
1530 | } | 1525 | } |
1531 | 1526 | ||
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 6cde5b530c69..d3a9041938ea 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c | |||
@@ -1337,7 +1337,6 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p | |||
1337 | GFP_KERNEL); | 1337 | GFP_KERNEL); |
1338 | if (!pdata->dma_slave) { | 1338 | if (!pdata->dma_slave) { |
1339 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); | 1339 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); |
1340 | devm_kfree(&pdev->dev, pdata); | ||
1341 | return ERR_PTR(-ENOMEM); | 1340 | return ERR_PTR(-ENOMEM); |
1342 | } | 1341 | } |
1343 | 1342 | ||
@@ -1359,7 +1358,7 @@ static int atmel_tdes_probe(struct platform_device *pdev) | |||
1359 | unsigned long tdes_phys_size; | 1358 | unsigned long tdes_phys_size; |
1360 | int err; | 1359 | int err; |
1361 | 1360 | ||
1362 | tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL); | 1361 | tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL); |
1363 | if (tdes_dd == NULL) { | 1362 | if (tdes_dd == NULL) { |
1364 | dev_err(dev, "unable to alloc data struct.\n"); | 1363 | dev_err(dev, "unable to alloc data struct.\n"); |
1365 | err = -ENOMEM; | 1364 | err = -ENOMEM; |
@@ -1483,8 +1482,6 @@ tdes_irq_err: | |||
1483 | res_err: | 1482 | res_err: |
1484 | tasklet_kill(&tdes_dd->done_task); | 1483 | tasklet_kill(&tdes_dd->done_task); |
1485 | tasklet_kill(&tdes_dd->queue_task); | 1484 | tasklet_kill(&tdes_dd->queue_task); |
1486 | kfree(tdes_dd); | ||
1487 | tdes_dd = NULL; | ||
1488 | tdes_dd_err: | 1485 | tdes_dd_err: |
1489 | dev_err(dev, "initialization failed.\n"); | 1486 | dev_err(dev, "initialization failed.\n"); |
1490 | 1487 | ||
@@ -1519,9 +1516,6 @@ static int atmel_tdes_remove(struct platform_device *pdev) | |||
1519 | if (tdes_dd->irq >= 0) | 1516 | if (tdes_dd->irq >= 0) |
1520 | free_irq(tdes_dd->irq, tdes_dd); | 1517 | free_irq(tdes_dd->irq, tdes_dd); |
1521 | 1518 | ||
1522 | kfree(tdes_dd); | ||
1523 | tdes_dd = NULL; | ||
1524 | |||
1525 | return 0; | 1519 | return 0; |
1526 | } | 1520 | } |
1527 | 1521 | ||
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index c09ce1f040d3..a80ea853701d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type) | |||
97 | { | 97 | { |
98 | u32 *jump_cmd, *uncond_jump_cmd; | 98 | u32 *jump_cmd, *uncond_jump_cmd; |
99 | 99 | ||
100 | /* DK bit is valid only for AES */ | ||
101 | if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { | ||
102 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
103 | OP_ALG_DECRYPT); | ||
104 | return; | ||
105 | } | ||
106 | |||
100 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | 107 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); |
101 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | 108 | append_operation(desc, type | OP_ALG_AS_INITFINAL | |
102 | OP_ALG_DECRYPT); | 109 | OP_ALG_DECRYPT); |
@@ -786,7 +793,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
786 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 793 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
787 | desc_bytes(desc), | 794 | desc_bytes(desc), |
788 | DMA_TO_DEVICE); | 795 | DMA_TO_DEVICE); |
789 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | 796 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { |
790 | dev_err(jrdev, "unable to map shared descriptor\n"); | 797 | dev_err(jrdev, "unable to map shared descriptor\n"); |
791 | return -ENOMEM; | 798 | return -ENOMEM; |
792 | } | 799 | } |
@@ -1313,8 +1320,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1313 | DMA_FROM_DEVICE, dst_chained); | 1320 | DMA_FROM_DEVICE, dst_chained); |
1314 | } | 1321 | } |
1315 | 1322 | ||
1316 | /* Check if data are contiguous */ | ||
1317 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | 1323 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); |
1324 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1325 | dev_err(jrdev, "unable to map IV\n"); | ||
1326 | return ERR_PTR(-ENOMEM); | ||
1327 | } | ||
1328 | |||
1329 | /* Check if data are contiguous */ | ||
1318 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | 1330 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != |
1319 | iv_dma || src_nents || iv_dma + ivsize != | 1331 | iv_dma || src_nents || iv_dma + ivsize != |
1320 | sg_dma_address(req->src)) { | 1332 | sg_dma_address(req->src)) { |
@@ -1345,8 +1357,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1345 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1357 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1346 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | 1358 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
1347 | desc_bytes; | 1359 | desc_bytes; |
1348 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1349 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1350 | *all_contig_ptr = all_contig; | 1360 | *all_contig_ptr = all_contig; |
1351 | 1361 | ||
1352 | sec4_sg_index = 0; | 1362 | sec4_sg_index = 0; |
@@ -1369,6 +1379,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1369 | sg_to_sec4_sg_last(req->dst, dst_nents, | 1379 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1370 | edesc->sec4_sg + sec4_sg_index, 0); | 1380 | edesc->sec4_sg + sec4_sg_index, 0); |
1371 | } | 1381 | } |
1382 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1383 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1384 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1385 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1386 | return ERR_PTR(-ENOMEM); | ||
1387 | } | ||
1372 | 1388 | ||
1373 | return edesc; | 1389 | return edesc; |
1374 | } | 1390 | } |
@@ -1494,8 +1510,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1494 | DMA_FROM_DEVICE, dst_chained); | 1510 | DMA_FROM_DEVICE, dst_chained); |
1495 | } | 1511 | } |
1496 | 1512 | ||
1497 | /* Check if data are contiguous */ | ||
1498 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); | 1513 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); |
1514 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1515 | dev_err(jrdev, "unable to map IV\n"); | ||
1516 | return ERR_PTR(-ENOMEM); | ||
1517 | } | ||
1518 | |||
1519 | /* Check if data are contiguous */ | ||
1499 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | 1520 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != |
1500 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) | 1521 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) |
1501 | contig &= ~GIV_SRC_CONTIG; | 1522 | contig &= ~GIV_SRC_CONTIG; |
@@ -1534,8 +1555,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1534 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1555 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1535 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | 1556 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
1536 | desc_bytes; | 1557 | desc_bytes; |
1537 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1538 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1539 | *contig_ptr = contig; | 1558 | *contig_ptr = contig; |
1540 | 1559 | ||
1541 | sec4_sg_index = 0; | 1560 | sec4_sg_index = 0; |
@@ -1559,6 +1578,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1559 | sg_to_sec4_sg_last(req->dst, dst_nents, | 1578 | sg_to_sec4_sg_last(req->dst, dst_nents, |
1560 | edesc->sec4_sg + sec4_sg_index, 0); | 1579 | edesc->sec4_sg + sec4_sg_index, 0); |
1561 | } | 1580 | } |
1581 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1582 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1583 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1584 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1585 | return ERR_PTR(-ENOMEM); | ||
1586 | } | ||
1562 | 1587 | ||
1563 | return edesc; | 1588 | return edesc; |
1564 | } | 1589 | } |
@@ -1650,11 +1675,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1650 | DMA_FROM_DEVICE, dst_chained); | 1675 | DMA_FROM_DEVICE, dst_chained); |
1651 | } | 1676 | } |
1652 | 1677 | ||
1678 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); | ||
1679 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
1680 | dev_err(jrdev, "unable to map IV\n"); | ||
1681 | return ERR_PTR(-ENOMEM); | ||
1682 | } | ||
1683 | |||
1653 | /* | 1684 | /* |
1654 | * Check if iv can be contiguous with source and destination. | 1685 | * Check if iv can be contiguous with source and destination. |
1655 | * If so, include it. If not, create scatterlist. | 1686 | * If so, include it. If not, create scatterlist. |
1656 | */ | 1687 | */ |
1657 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); | ||
1658 | if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) | 1688 | if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) |
1659 | iv_contig = true; | 1689 | iv_contig = true; |
1660 | else | 1690 | else |
@@ -1693,6 +1723,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1693 | 1723 | ||
1694 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1724 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1695 | sec4_sg_bytes, DMA_TO_DEVICE); | 1725 | sec4_sg_bytes, DMA_TO_DEVICE); |
1726 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1727 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1728 | return ERR_PTR(-ENOMEM); | ||
1729 | } | ||
1730 | |||
1696 | edesc->iv_dma = iv_dma; | 1731 | edesc->iv_dma = iv_dma; |
1697 | 1732 | ||
1698 | #ifdef DEBUG | 1733 | #ifdef DEBUG |
@@ -2441,8 +2476,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template | |||
2441 | 2476 | ||
2442 | static int __init caam_algapi_init(void) | 2477 | static int __init caam_algapi_init(void) |
2443 | { | 2478 | { |
2479 | struct device_node *dev_node; | ||
2480 | struct platform_device *pdev; | ||
2481 | struct device *ctrldev; | ||
2482 | void *priv; | ||
2444 | int i = 0, err = 0; | 2483 | int i = 0, err = 0; |
2445 | 2484 | ||
2485 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
2486 | if (!dev_node) { | ||
2487 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
2488 | if (!dev_node) | ||
2489 | return -ENODEV; | ||
2490 | } | ||
2491 | |||
2492 | pdev = of_find_device_by_node(dev_node); | ||
2493 | if (!pdev) { | ||
2494 | of_node_put(dev_node); | ||
2495 | return -ENODEV; | ||
2496 | } | ||
2497 | |||
2498 | ctrldev = &pdev->dev; | ||
2499 | priv = dev_get_drvdata(ctrldev); | ||
2500 | of_node_put(dev_node); | ||
2501 | |||
2502 | /* | ||
2503 | * If priv is NULL, it's probably because the caam driver wasn't | ||
2504 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
2505 | */ | ||
2506 | if (!priv) | ||
2507 | return -ENODEV; | ||
2508 | |||
2509 | |||
2446 | INIT_LIST_HEAD(&alg_list); | 2510 | INIT_LIST_HEAD(&alg_list); |
2447 | 2511 | ||
2448 | /* register crypto algorithms the device supports */ | 2512 | /* register crypto algorithms the device supports */ |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 0d9284ef96a8..b464d03ebf40 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -137,13 +137,20 @@ struct caam_hash_state { | |||
137 | /* Common job descriptor seq in/out ptr routines */ | 137 | /* Common job descriptor seq in/out ptr routines */ |
138 | 138 | ||
139 | /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ | 139 | /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ |
140 | static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, | 140 | static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, |
141 | struct caam_hash_state *state, | 141 | struct caam_hash_state *state, |
142 | int ctx_len) | 142 | int ctx_len) |
143 | { | 143 | { |
144 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, | 144 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, |
145 | ctx_len, DMA_FROM_DEVICE); | 145 | ctx_len, DMA_FROM_DEVICE); |
146 | if (dma_mapping_error(jrdev, state->ctx_dma)) { | ||
147 | dev_err(jrdev, "unable to map ctx\n"); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | |||
146 | append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); | 151 | append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); |
152 | |||
153 | return 0; | ||
147 | } | 154 | } |
148 | 155 | ||
149 | /* Map req->result, and append seq_out_ptr command that points to it */ | 156 | /* Map req->result, and append seq_out_ptr command that points to it */ |
@@ -201,14 +208,19 @@ try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, | |||
201 | } | 208 | } |
202 | 209 | ||
203 | /* Map state->caam_ctx, and add it to link table */ | 210 | /* Map state->caam_ctx, and add it to link table */ |
204 | static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, | 211 | static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, |
205 | struct caam_hash_state *state, | 212 | struct caam_hash_state *state, int ctx_len, |
206 | int ctx_len, | 213 | struct sec4_sg_entry *sec4_sg, u32 flag) |
207 | struct sec4_sg_entry *sec4_sg, | ||
208 | u32 flag) | ||
209 | { | 214 | { |
210 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); | 215 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); |
216 | if (dma_mapping_error(jrdev, state->ctx_dma)) { | ||
217 | dev_err(jrdev, "unable to map ctx\n"); | ||
218 | return -ENOMEM; | ||
219 | } | ||
220 | |||
211 | dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); | 221 | dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); |
222 | |||
223 | return 0; | ||
212 | } | 224 | } |
213 | 225 | ||
214 | /* Common shared descriptor commands */ | 226 | /* Common shared descriptor commands */ |
@@ -487,11 +499,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
487 | digestsize, 1); | 499 | digestsize, 1); |
488 | #endif | 500 | #endif |
489 | } | 501 | } |
490 | *keylen = digestsize; | ||
491 | |||
492 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); | 502 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); |
493 | dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); | 503 | dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); |
494 | 504 | ||
505 | *keylen = digestsize; | ||
506 | |||
495 | kfree(desc); | 507 | kfree(desc); |
496 | 508 | ||
497 | return ret; | 509 | return ret; |
@@ -706,7 +718,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
706 | if (err) | 718 | if (err) |
707 | caam_jr_strstatus(jrdev, err); | 719 | caam_jr_strstatus(jrdev, err); |
708 | 720 | ||
709 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | 721 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); |
710 | kfree(edesc); | 722 | kfree(edesc); |
711 | 723 | ||
712 | #ifdef DEBUG | 724 | #ifdef DEBUG |
@@ -741,7 +753,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
741 | if (err) | 753 | if (err) |
742 | caam_jr_strstatus(jrdev, err); | 754 | caam_jr_strstatus(jrdev, err); |
743 | 755 | ||
744 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); | 756 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); |
745 | kfree(edesc); | 757 | kfree(edesc); |
746 | 758 | ||
747 | #ifdef DEBUG | 759 | #ifdef DEBUG |
@@ -808,12 +820,11 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
808 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 820 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
809 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 821 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
810 | DESC_JOB_IO_LEN; | 822 | DESC_JOB_IO_LEN; |
811 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
812 | sec4_sg_bytes, | ||
813 | DMA_TO_DEVICE); | ||
814 | 823 | ||
815 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | 824 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
816 | edesc->sec4_sg, DMA_BIDIRECTIONAL); | 825 | edesc->sec4_sg, DMA_BIDIRECTIONAL); |
826 | if (ret) | ||
827 | return ret; | ||
817 | 828 | ||
818 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, | 829 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, |
819 | edesc->sec4_sg + 1, | 830 | edesc->sec4_sg + 1, |
@@ -839,6 +850,14 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
839 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | 850 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | |
840 | HDR_REVERSE); | 851 | HDR_REVERSE); |
841 | 852 | ||
853 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
854 | sec4_sg_bytes, | ||
855 | DMA_TO_DEVICE); | ||
856 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
857 | dev_err(jrdev, "unable to map S/G table\n"); | ||
858 | return -ENOMEM; | ||
859 | } | ||
860 | |||
842 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | 861 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + |
843 | to_hash, LDST_SGF); | 862 | to_hash, LDST_SGF); |
844 | 863 | ||
@@ -911,23 +930,34 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
911 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 930 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
912 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 931 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
913 | DESC_JOB_IO_LEN; | 932 | DESC_JOB_IO_LEN; |
914 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
915 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
916 | edesc->src_nents = 0; | 933 | edesc->src_nents = 0; |
917 | 934 | ||
918 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, | 935 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
919 | DMA_TO_DEVICE); | 936 | edesc->sec4_sg, DMA_TO_DEVICE); |
937 | if (ret) | ||
938 | return ret; | ||
920 | 939 | ||
921 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | 940 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, |
922 | buf, state->buf_dma, buflen, | 941 | buf, state->buf_dma, buflen, |
923 | last_buflen); | 942 | last_buflen); |
924 | (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; | 943 | (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; |
925 | 944 | ||
945 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
946 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
947 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
948 | dev_err(jrdev, "unable to map S/G table\n"); | ||
949 | return -ENOMEM; | ||
950 | } | ||
951 | |||
926 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, | 952 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, |
927 | LDST_SGF); | 953 | LDST_SGF); |
928 | 954 | ||
929 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 955 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
930 | digestsize); | 956 | digestsize); |
957 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
958 | dev_err(jrdev, "unable to map dst\n"); | ||
959 | return -ENOMEM; | ||
960 | } | ||
931 | 961 | ||
932 | #ifdef DEBUG | 962 | #ifdef DEBUG |
933 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 963 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -989,11 +1019,11 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
989 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1019 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
990 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1020 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
991 | DESC_JOB_IO_LEN; | 1021 | DESC_JOB_IO_LEN; |
992 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
993 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
994 | 1022 | ||
995 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, | 1023 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
996 | DMA_TO_DEVICE); | 1024 | edesc->sec4_sg, DMA_TO_DEVICE); |
1025 | if (ret) | ||
1026 | return ret; | ||
997 | 1027 | ||
998 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | 1028 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, |
999 | buf, state->buf_dma, buflen, | 1029 | buf, state->buf_dma, buflen, |
@@ -1002,11 +1032,22 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
1002 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + | 1032 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + |
1003 | sec4_sg_src_index, chained); | 1033 | sec4_sg_src_index, chained); |
1004 | 1034 | ||
1035 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1036 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1037 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1038 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1039 | return -ENOMEM; | ||
1040 | } | ||
1041 | |||
1005 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | 1042 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + |
1006 | buflen + req->nbytes, LDST_SGF); | 1043 | buflen + req->nbytes, LDST_SGF); |
1007 | 1044 | ||
1008 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1045 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1009 | digestsize); | 1046 | digestsize); |
1047 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
1048 | dev_err(jrdev, "unable to map dst\n"); | ||
1049 | return -ENOMEM; | ||
1050 | } | ||
1010 | 1051 | ||
1011 | #ifdef DEBUG | 1052 | #ifdef DEBUG |
1012 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1053 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1056,8 +1097,7 @@ static int ahash_digest(struct ahash_request *req) | |||
1056 | } | 1097 | } |
1057 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1098 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
1058 | DESC_JOB_IO_LEN; | 1099 | DESC_JOB_IO_LEN; |
1059 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1100 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1060 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1061 | edesc->src_nents = src_nents; | 1101 | edesc->src_nents = src_nents; |
1062 | edesc->chained = chained; | 1102 | edesc->chained = chained; |
1063 | 1103 | ||
@@ -1067,6 +1107,12 @@ static int ahash_digest(struct ahash_request *req) | |||
1067 | 1107 | ||
1068 | if (src_nents) { | 1108 | if (src_nents) { |
1069 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); | 1109 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); |
1110 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1111 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1112 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1113 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1114 | return -ENOMEM; | ||
1115 | } | ||
1070 | src_dma = edesc->sec4_sg_dma; | 1116 | src_dma = edesc->sec4_sg_dma; |
1071 | options = LDST_SGF; | 1117 | options = LDST_SGF; |
1072 | } else { | 1118 | } else { |
@@ -1077,6 +1123,10 @@ static int ahash_digest(struct ahash_request *req) | |||
1077 | 1123 | ||
1078 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1124 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1079 | digestsize); | 1125 | digestsize); |
1126 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
1127 | dev_err(jrdev, "unable to map dst\n"); | ||
1128 | return -ENOMEM; | ||
1129 | } | ||
1080 | 1130 | ||
1081 | #ifdef DEBUG | 1131 | #ifdef DEBUG |
1082 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1132 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1125,11 +1175,19 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
1125 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | 1175 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); |
1126 | 1176 | ||
1127 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | 1177 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); |
1178 | if (dma_mapping_error(jrdev, state->buf_dma)) { | ||
1179 | dev_err(jrdev, "unable to map src\n"); | ||
1180 | return -ENOMEM; | ||
1181 | } | ||
1128 | 1182 | ||
1129 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | 1183 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
1130 | 1184 | ||
1131 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1185 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1132 | digestsize); | 1186 | digestsize); |
1187 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
1188 | dev_err(jrdev, "unable to map dst\n"); | ||
1189 | return -ENOMEM; | ||
1190 | } | ||
1133 | edesc->src_nents = 0; | 1191 | edesc->src_nents = 0; |
1134 | 1192 | ||
1135 | #ifdef DEBUG | 1193 | #ifdef DEBUG |
@@ -1197,9 +1255,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1197 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1255 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1198 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1256 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
1199 | DESC_JOB_IO_LEN; | 1257 | DESC_JOB_IO_LEN; |
1200 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1258 | edesc->dst_dma = 0; |
1201 | sec4_sg_bytes, | ||
1202 | DMA_TO_DEVICE); | ||
1203 | 1259 | ||
1204 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, | 1260 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, |
1205 | buf, *buflen); | 1261 | buf, *buflen); |
@@ -1216,9 +1272,19 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1216 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | 1272 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | |
1217 | HDR_REVERSE); | 1273 | HDR_REVERSE); |
1218 | 1274 | ||
1275 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1276 | sec4_sg_bytes, | ||
1277 | DMA_TO_DEVICE); | ||
1278 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1279 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1280 | return -ENOMEM; | ||
1281 | } | ||
1282 | |||
1219 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); | 1283 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); |
1220 | 1284 | ||
1221 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | 1285 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1286 | if (ret) | ||
1287 | return ret; | ||
1222 | 1288 | ||
1223 | #ifdef DEBUG | 1289 | #ifdef DEBUG |
1224 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1290 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1297,8 +1363,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
1297 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1363 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1298 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1364 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
1299 | DESC_JOB_IO_LEN; | 1365 | DESC_JOB_IO_LEN; |
1300 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1301 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1302 | 1366 | ||
1303 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, | 1367 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, |
1304 | state->buf_dma, buflen, | 1368 | state->buf_dma, buflen, |
@@ -1307,11 +1371,22 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
1307 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, | 1371 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, |
1308 | chained); | 1372 | chained); |
1309 | 1373 | ||
1374 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
1375 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
1376 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1377 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1378 | return -ENOMEM; | ||
1379 | } | ||
1380 | |||
1310 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + | 1381 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + |
1311 | req->nbytes, LDST_SGF); | 1382 | req->nbytes, LDST_SGF); |
1312 | 1383 | ||
1313 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1384 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1314 | digestsize); | 1385 | digestsize); |
1386 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | ||
1387 | dev_err(jrdev, "unable to map dst\n"); | ||
1388 | return -ENOMEM; | ||
1389 | } | ||
1315 | 1390 | ||
1316 | #ifdef DEBUG | 1391 | #ifdef DEBUG |
1317 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1392 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1380,13 +1455,19 @@ static int ahash_update_first(struct ahash_request *req) | |||
1380 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1455 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1381 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | 1456 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + |
1382 | DESC_JOB_IO_LEN; | 1457 | DESC_JOB_IO_LEN; |
1383 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1458 | edesc->dst_dma = 0; |
1384 | sec4_sg_bytes, | ||
1385 | DMA_TO_DEVICE); | ||
1386 | 1459 | ||
1387 | if (src_nents) { | 1460 | if (src_nents) { |
1388 | sg_to_sec4_sg_last(req->src, src_nents, | 1461 | sg_to_sec4_sg_last(req->src, src_nents, |
1389 | edesc->sec4_sg, 0); | 1462 | edesc->sec4_sg, 0); |
1463 | edesc->sec4_sg_dma = dma_map_single(jrdev, | ||
1464 | edesc->sec4_sg, | ||
1465 | sec4_sg_bytes, | ||
1466 | DMA_TO_DEVICE); | ||
1467 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
1468 | dev_err(jrdev, "unable to map S/G table\n"); | ||
1469 | return -ENOMEM; | ||
1470 | } | ||
1390 | src_dma = edesc->sec4_sg_dma; | 1471 | src_dma = edesc->sec4_sg_dma; |
1391 | options = LDST_SGF; | 1472 | options = LDST_SGF; |
1392 | } else { | 1473 | } else { |
@@ -1404,7 +1485,9 @@ static int ahash_update_first(struct ahash_request *req) | |||
1404 | 1485 | ||
1405 | append_seq_in_ptr(desc, src_dma, to_hash, options); | 1486 | append_seq_in_ptr(desc, src_dma, to_hash, options); |
1406 | 1487 | ||
1407 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | 1488 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1489 | if (ret) | ||
1490 | return ret; | ||
1408 | 1491 | ||
1409 | #ifdef DEBUG | 1492 | #ifdef DEBUG |
1410 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1493 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
@@ -1453,6 +1536,7 @@ static int ahash_init(struct ahash_request *req) | |||
1453 | state->final = ahash_final_no_ctx; | 1536 | state->final = ahash_final_no_ctx; |
1454 | 1537 | ||
1455 | state->current_buf = 0; | 1538 | state->current_buf = 0; |
1539 | state->buf_dma = 0; | ||
1456 | 1540 | ||
1457 | return 0; | 1541 | return 0; |
1458 | } | 1542 | } |
@@ -1787,8 +1871,36 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
1787 | 1871 | ||
1788 | static int __init caam_algapi_hash_init(void) | 1872 | static int __init caam_algapi_hash_init(void) |
1789 | { | 1873 | { |
1874 | struct device_node *dev_node; | ||
1875 | struct platform_device *pdev; | ||
1876 | struct device *ctrldev; | ||
1877 | void *priv; | ||
1790 | int i = 0, err = 0; | 1878 | int i = 0, err = 0; |
1791 | 1879 | ||
1880 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1881 | if (!dev_node) { | ||
1882 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
1883 | if (!dev_node) | ||
1884 | return -ENODEV; | ||
1885 | } | ||
1886 | |||
1887 | pdev = of_find_device_by_node(dev_node); | ||
1888 | if (!pdev) { | ||
1889 | of_node_put(dev_node); | ||
1890 | return -ENODEV; | ||
1891 | } | ||
1892 | |||
1893 | ctrldev = &pdev->dev; | ||
1894 | priv = dev_get_drvdata(ctrldev); | ||
1895 | of_node_put(dev_node); | ||
1896 | |||
1897 | /* | ||
1898 | * If priv is NULL, it's probably because the caam driver wasn't | ||
1899 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
1900 | */ | ||
1901 | if (!priv) | ||
1902 | return -ENODEV; | ||
1903 | |||
1792 | INIT_LIST_HEAD(&hash_list); | 1904 | INIT_LIST_HEAD(&hash_list); |
1793 | 1905 | ||
1794 | /* register crypto algorithms the device supports */ | 1906 | /* register crypto algorithms the device supports */ |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 8c07d3153f12..ae31e555793c 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
@@ -185,7 +185,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) | |||
185 | max - copied_idx, false); | 185 | max - copied_idx, false); |
186 | } | 186 | } |
187 | 187 | ||
188 | static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) | 188 | static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) |
189 | { | 189 | { |
190 | struct device *jrdev = ctx->jrdev; | 190 | struct device *jrdev = ctx->jrdev; |
191 | u32 *desc = ctx->sh_desc; | 191 | u32 *desc = ctx->sh_desc; |
@@ -203,13 +203,18 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) | |||
203 | 203 | ||
204 | ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | 204 | ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), |
205 | DMA_TO_DEVICE); | 205 | DMA_TO_DEVICE); |
206 | if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { | ||
207 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
208 | return -ENOMEM; | ||
209 | } | ||
206 | #ifdef DEBUG | 210 | #ifdef DEBUG |
207 | print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | 211 | print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
208 | desc, desc_bytes(desc), 1); | 212 | desc, desc_bytes(desc), 1); |
209 | #endif | 213 | #endif |
214 | return 0; | ||
210 | } | 215 | } |
211 | 216 | ||
212 | static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) | 217 | static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) |
213 | { | 218 | { |
214 | struct device *jrdev = ctx->jrdev; | 219 | struct device *jrdev = ctx->jrdev; |
215 | struct buf_data *bd = &ctx->bufs[buf_id]; | 220 | struct buf_data *bd = &ctx->bufs[buf_id]; |
@@ -220,12 +225,17 @@ static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) | |||
220 | HDR_REVERSE); | 225 | HDR_REVERSE); |
221 | 226 | ||
222 | bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); | 227 | bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); |
228 | if (dma_mapping_error(jrdev, bd->addr)) { | ||
229 | dev_err(jrdev, "unable to map dst\n"); | ||
230 | return -ENOMEM; | ||
231 | } | ||
223 | 232 | ||
224 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); | 233 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); |
225 | #ifdef DEBUG | 234 | #ifdef DEBUG |
226 | print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | 235 | print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
227 | desc, desc_bytes(desc), 1); | 236 | desc, desc_bytes(desc), 1); |
228 | #endif | 237 | #endif |
238 | return 0; | ||
229 | } | 239 | } |
230 | 240 | ||
231 | static void caam_cleanup(struct hwrng *rng) | 241 | static void caam_cleanup(struct hwrng *rng) |
@@ -242,24 +252,44 @@ static void caam_cleanup(struct hwrng *rng) | |||
242 | rng_unmap_ctx(rng_ctx); | 252 | rng_unmap_ctx(rng_ctx); |
243 | } | 253 | } |
244 | 254 | ||
245 | static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) | 255 | static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) |
246 | { | 256 | { |
247 | struct buf_data *bd = &ctx->bufs[buf_id]; | 257 | struct buf_data *bd = &ctx->bufs[buf_id]; |
258 | int err; | ||
259 | |||
260 | err = rng_create_job_desc(ctx, buf_id); | ||
261 | if (err) | ||
262 | return err; | ||
248 | 263 | ||
249 | rng_create_job_desc(ctx, buf_id); | ||
250 | atomic_set(&bd->empty, BUF_EMPTY); | 264 | atomic_set(&bd->empty, BUF_EMPTY); |
251 | submit_job(ctx, buf_id == ctx->current_buf); | 265 | submit_job(ctx, buf_id == ctx->current_buf); |
252 | wait_for_completion(&bd->filled); | 266 | wait_for_completion(&bd->filled); |
267 | |||
268 | return 0; | ||
253 | } | 269 | } |
254 | 270 | ||
255 | static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) | 271 | static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) |
256 | { | 272 | { |
273 | int err; | ||
274 | |||
257 | ctx->jrdev = jrdev; | 275 | ctx->jrdev = jrdev; |
258 | rng_create_sh_desc(ctx); | 276 | |
277 | err = rng_create_sh_desc(ctx); | ||
278 | if (err) | ||
279 | return err; | ||
280 | |||
259 | ctx->current_buf = 0; | 281 | ctx->current_buf = 0; |
260 | ctx->cur_buf_idx = 0; | 282 | ctx->cur_buf_idx = 0; |
261 | caam_init_buf(ctx, 0); | 283 | |
262 | caam_init_buf(ctx, 1); | 284 | err = caam_init_buf(ctx, 0); |
285 | if (err) | ||
286 | return err; | ||
287 | |||
288 | err = caam_init_buf(ctx, 1); | ||
289 | if (err) | ||
290 | return err; | ||
291 | |||
292 | return 0; | ||
263 | } | 293 | } |
264 | 294 | ||
265 | static struct hwrng caam_rng = { | 295 | static struct hwrng caam_rng = { |
@@ -278,6 +308,35 @@ static void __exit caam_rng_exit(void) | |||
278 | static int __init caam_rng_init(void) | 308 | static int __init caam_rng_init(void) |
279 | { | 309 | { |
280 | struct device *dev; | 310 | struct device *dev; |
311 | struct device_node *dev_node; | ||
312 | struct platform_device *pdev; | ||
313 | struct device *ctrldev; | ||
314 | void *priv; | ||
315 | int err; | ||
316 | |||
317 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
318 | if (!dev_node) { | ||
319 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
320 | if (!dev_node) | ||
321 | return -ENODEV; | ||
322 | } | ||
323 | |||
324 | pdev = of_find_device_by_node(dev_node); | ||
325 | if (!pdev) { | ||
326 | of_node_put(dev_node); | ||
327 | return -ENODEV; | ||
328 | } | ||
329 | |||
330 | ctrldev = &pdev->dev; | ||
331 | priv = dev_get_drvdata(ctrldev); | ||
332 | of_node_put(dev_node); | ||
333 | |||
334 | /* | ||
335 | * If priv is NULL, it's probably because the caam driver wasn't | ||
336 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
337 | */ | ||
338 | if (!priv) | ||
339 | return -ENODEV; | ||
281 | 340 | ||
282 | dev = caam_jr_alloc(); | 341 | dev = caam_jr_alloc(); |
283 | if (IS_ERR(dev)) { | 342 | if (IS_ERR(dev)) { |
@@ -287,7 +346,9 @@ static int __init caam_rng_init(void) | |||
287 | rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); | 346 | rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); |
288 | if (!rng_ctx) | 347 | if (!rng_ctx) |
289 | return -ENOMEM; | 348 | return -ENOMEM; |
290 | caam_init_rng(rng_ctx, dev); | 349 | err = caam_init_rng(rng_ctx, dev); |
350 | if (err) | ||
351 | return err; | ||
291 | 352 | ||
292 | dev_info(dev, "registering rng-caam\n"); | 353 | dev_info(dev, "registering rng-caam\n"); |
293 | return hwrng_register(&caam_rng); | 354 | return hwrng_register(&caam_rng); |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 1c38f86bf63a..3cade79ea41e 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/device.h> | ||
8 | #include <linux/of_address.h> | 9 | #include <linux/of_address.h> |
9 | #include <linux/of_irq.h> | 10 | #include <linux/of_irq.h> |
10 | 11 | ||
@@ -87,6 +88,17 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, | |||
87 | 88 | ||
88 | /* Set the bit to request direct access to DECO0 */ | 89 | /* Set the bit to request direct access to DECO0 */ |
89 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | 90 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; |
91 | |||
92 | if (ctrlpriv->virt_en == 1) { | ||
93 | setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); | ||
94 | |||
95 | while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) && | ||
96 | --timeout) | ||
97 | cpu_relax(); | ||
98 | |||
99 | timeout = 100000; | ||
100 | } | ||
101 | |||
90 | setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); | 102 | setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
91 | 103 | ||
92 | while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && | 104 | while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && |
@@ -129,6 +141,9 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, | |||
129 | *status = rd_reg32(&topregs->deco.op_status_hi) & | 141 | *status = rd_reg32(&topregs->deco.op_status_hi) & |
130 | DECO_OP_STATUS_HI_ERR_MASK; | 142 | DECO_OP_STATUS_HI_ERR_MASK; |
131 | 143 | ||
144 | if (ctrlpriv->virt_en == 1) | ||
145 | clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); | ||
146 | |||
132 | /* Mark the DECO as free */ | 147 | /* Mark the DECO as free */ |
133 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); | 148 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
134 | 149 | ||
@@ -295,9 +310,6 @@ static int caam_remove(struct platform_device *pdev) | |||
295 | /* Unmap controller region */ | 310 | /* Unmap controller region */ |
296 | iounmap(&topregs->ctrl); | 311 | iounmap(&topregs->ctrl); |
297 | 312 | ||
298 | kfree(ctrlpriv->jrpdev); | ||
299 | kfree(ctrlpriv); | ||
300 | |||
301 | return ret; | 313 | return ret; |
302 | } | 314 | } |
303 | 315 | ||
@@ -380,9 +392,11 @@ static int caam_probe(struct platform_device *pdev) | |||
380 | #ifdef CONFIG_DEBUG_FS | 392 | #ifdef CONFIG_DEBUG_FS |
381 | struct caam_perfmon *perfmon; | 393 | struct caam_perfmon *perfmon; |
382 | #endif | 394 | #endif |
383 | u64 cha_vid; | 395 | u32 scfgr, comp_params; |
396 | u32 cha_vid_ls; | ||
384 | 397 | ||
385 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); | 398 | ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), |
399 | GFP_KERNEL); | ||
386 | if (!ctrlpriv) | 400 | if (!ctrlpriv) |
387 | return -ENOMEM; | 401 | return -ENOMEM; |
388 | 402 | ||
@@ -413,13 +427,40 @@ static int caam_probe(struct platform_device *pdev) | |||
413 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | | 427 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | |
414 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); | 428 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); |
415 | 429 | ||
430 | /* | ||
431 | * Read the Compile Time paramters and SCFGR to determine | ||
432 | * if Virtualization is enabled for this platform | ||
433 | */ | ||
434 | comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms); | ||
435 | scfgr = rd_reg32(&topregs->ctrl.scfgr); | ||
436 | |||
437 | ctrlpriv->virt_en = 0; | ||
438 | if (comp_params & CTPR_MS_VIRT_EN_INCL) { | ||
439 | /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or | ||
440 | * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1 | ||
441 | */ | ||
442 | if ((comp_params & CTPR_MS_VIRT_EN_POR) || | ||
443 | (!(comp_params & CTPR_MS_VIRT_EN_POR) && | ||
444 | (scfgr & SCFGR_VIRT_EN))) | ||
445 | ctrlpriv->virt_en = 1; | ||
446 | } else { | ||
447 | /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */ | ||
448 | if (comp_params & CTPR_MS_VIRT_EN_POR) | ||
449 | ctrlpriv->virt_en = 1; | ||
450 | } | ||
451 | |||
452 | if (ctrlpriv->virt_en == 1) | ||
453 | setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START | | ||
454 | JRSTART_JR1_START | JRSTART_JR2_START | | ||
455 | JRSTART_JR3_START); | ||
456 | |||
416 | if (sizeof(dma_addr_t) == sizeof(u64)) | 457 | if (sizeof(dma_addr_t) == sizeof(u64)) |
417 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) | 458 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) |
418 | dma_set_mask(dev, DMA_BIT_MASK(40)); | 459 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
419 | else | 460 | else |
420 | dma_set_mask(dev, DMA_BIT_MASK(36)); | 461 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); |
421 | else | 462 | else |
422 | dma_set_mask(dev, DMA_BIT_MASK(32)); | 463 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
423 | 464 | ||
424 | /* | 465 | /* |
425 | * Detect and enable JobRs | 466 | * Detect and enable JobRs |
@@ -432,8 +473,9 @@ static int caam_probe(struct platform_device *pdev) | |||
432 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) | 473 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) |
433 | rspec++; | 474 | rspec++; |
434 | 475 | ||
435 | ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, | 476 | ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev, |
436 | GFP_KERNEL); | 477 | sizeof(struct platform_device *) * rspec, |
478 | GFP_KERNEL); | ||
437 | if (ctrlpriv->jrpdev == NULL) { | 479 | if (ctrlpriv->jrpdev == NULL) { |
438 | iounmap(&topregs->ctrl); | 480 | iounmap(&topregs->ctrl); |
439 | return -ENOMEM; | 481 | return -ENOMEM; |
@@ -456,8 +498,9 @@ static int caam_probe(struct platform_device *pdev) | |||
456 | } | 498 | } |
457 | 499 | ||
458 | /* Check to see if QI present. If so, enable */ | 500 | /* Check to see if QI present. If so, enable */ |
459 | ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) & | 501 | ctrlpriv->qi_present = |
460 | CTPR_QI_MASK); | 502 | !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) & |
503 | CTPR_MS_QI_MASK); | ||
461 | if (ctrlpriv->qi_present) { | 504 | if (ctrlpriv->qi_present) { |
462 | ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi; | 505 | ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi; |
463 | /* This is all that's required to physically enable QI */ | 506 | /* This is all that's required to physically enable QI */ |
@@ -471,13 +514,13 @@ static int caam_probe(struct platform_device *pdev) | |||
471 | return -ENOMEM; | 514 | return -ENOMEM; |
472 | } | 515 | } |
473 | 516 | ||
474 | cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id); | 517 | cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls); |
475 | 518 | ||
476 | /* | 519 | /* |
477 | * If SEC has RNG version >= 4 and RNG state handle has not been | 520 | * If SEC has RNG version >= 4 and RNG state handle has not been |
478 | * already instantiated, do RNG instantiation | 521 | * already instantiated, do RNG instantiation |
479 | */ | 522 | */ |
480 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) { | 523 | if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { |
481 | ctrlpriv->rng4_sh_init = | 524 | ctrlpriv->rng4_sh_init = |
482 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta); | 525 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta); |
483 | /* | 526 | /* |
@@ -531,7 +574,8 @@ static int caam_probe(struct platform_device *pdev) | |||
531 | 574 | ||
532 | /* NOTE: RTIC detection ought to go here, around Si time */ | 575 | /* NOTE: RTIC detection ought to go here, around Si time */ |
533 | 576 | ||
534 | caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); | 577 | caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 | |
578 | (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls); | ||
535 | 579 | ||
536 | /* Report "alive" for developer to see */ | 580 | /* Report "alive" for developer to see */ |
537 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, | 581 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, |
@@ -547,7 +591,7 @@ static int caam_probe(struct platform_device *pdev) | |||
547 | */ | 591 | */ |
548 | perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; | 592 | perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; |
549 | 593 | ||
550 | ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL); | 594 | ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL); |
551 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); | 595 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); |
552 | 596 | ||
553 | /* Controller-level - performance monitor counters */ | 597 | /* Controller-level - performance monitor counters */ |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 7e4500f18df6..d397ff9d56fd 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
@@ -321,7 +321,6 @@ struct sec4_sg_entry { | |||
321 | /* Continue - Not the last FIFO store to come */ | 321 | /* Continue - Not the last FIFO store to come */ |
322 | #define FIFOST_CONT_SHIFT 23 | 322 | #define FIFOST_CONT_SHIFT 23 |
323 | #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) | 323 | #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) |
324 | #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) | ||
325 | 324 | ||
326 | /* | 325 | /* |
327 | * Extended Length - use 32-bit extended length that | 326 | * Extended Length - use 32-bit extended length that |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 6d85fcc5bd0a..97363db4e56e 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -82,6 +82,7 @@ struct caam_drv_private { | |||
82 | u8 total_jobrs; /* Total Job Rings in device */ | 82 | u8 total_jobrs; /* Total Job Rings in device */ |
83 | u8 qi_present; /* Nonzero if QI present in device */ | 83 | u8 qi_present; /* Nonzero if QI present in device */ |
84 | int secvio_irq; /* Security violation interrupt number */ | 84 | int secvio_irq; /* Security violation interrupt number */ |
85 | int virt_en; /* Virtualization enabled in CAAM */ | ||
85 | 86 | ||
86 | #define RNG4_MAX_HANDLES 2 | 87 | #define RNG4_MAX_HANDLES 2 |
87 | /* RNG4 block */ | 88 | /* RNG4 block */ |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index b512a4ba7569..4d18e27ffa9e 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -476,11 +476,11 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
476 | 476 | ||
477 | if (sizeof(dma_addr_t) == sizeof(u64)) | 477 | if (sizeof(dma_addr_t) == sizeof(u64)) |
478 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) | 478 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) |
479 | dma_set_mask(jrdev, DMA_BIT_MASK(40)); | 479 | dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); |
480 | else | 480 | else |
481 | dma_set_mask(jrdev, DMA_BIT_MASK(36)); | 481 | dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36)); |
482 | else | 482 | else |
483 | dma_set_mask(jrdev, DMA_BIT_MASK(32)); | 483 | dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); |
484 | 484 | ||
485 | /* Identify the interrupt */ | 485 | /* Identify the interrupt */ |
486 | jrpriv->irq = irq_of_parse_and_map(nprop, 0); | 486 | jrpriv->irq = irq_of_parse_and_map(nprop, 0); |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index cbde8b95a6f8..f48e344ffc39 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -84,6 +84,7 @@ | |||
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #ifndef CONFIG_64BIT | 86 | #ifndef CONFIG_64BIT |
87 | #ifdef __BIG_ENDIAN | ||
87 | static inline void wr_reg64(u64 __iomem *reg, u64 data) | 88 | static inline void wr_reg64(u64 __iomem *reg, u64 data) |
88 | { | 89 | { |
89 | wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); | 90 | wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); |
@@ -95,6 +96,21 @@ static inline u64 rd_reg64(u64 __iomem *reg) | |||
95 | return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | | 96 | return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | |
96 | ((u64)rd_reg32((u32 __iomem *)reg + 1)); | 97 | ((u64)rd_reg32((u32 __iomem *)reg + 1)); |
97 | } | 98 | } |
99 | #else | ||
100 | #ifdef __LITTLE_ENDIAN | ||
101 | static inline void wr_reg64(u64 __iomem *reg, u64 data) | ||
102 | { | ||
103 | wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32); | ||
104 | wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull); | ||
105 | } | ||
106 | |||
107 | static inline u64 rd_reg64(u64 __iomem *reg) | ||
108 | { | ||
109 | return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) | | ||
110 | ((u64)rd_reg32((u32 __iomem *)reg)); | ||
111 | } | ||
112 | #endif | ||
113 | #endif | ||
98 | #endif | 114 | #endif |
99 | 115 | ||
100 | /* | 116 | /* |
@@ -114,45 +130,45 @@ struct jr_outentry { | |||
114 | */ | 130 | */ |
115 | 131 | ||
116 | /* Number of DECOs */ | 132 | /* Number of DECOs */ |
117 | #define CHA_NUM_DECONUM_SHIFT 56 | 133 | #define CHA_NUM_MS_DECONUM_SHIFT 24 |
118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) | 134 | #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) |
119 | 135 | ||
120 | /* CHA Version IDs */ | 136 | /* CHA Version IDs */ |
121 | #define CHA_ID_AES_SHIFT 0 | 137 | #define CHA_ID_LS_AES_SHIFT 0 |
122 | #define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT) | 138 | #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) |
123 | 139 | ||
124 | #define CHA_ID_DES_SHIFT 4 | 140 | #define CHA_ID_LS_DES_SHIFT 4 |
125 | #define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT) | 141 | #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) |
126 | 142 | ||
127 | #define CHA_ID_ARC4_SHIFT 8 | 143 | #define CHA_ID_LS_ARC4_SHIFT 8 |
128 | #define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT) | 144 | #define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT) |
129 | 145 | ||
130 | #define CHA_ID_MD_SHIFT 12 | 146 | #define CHA_ID_LS_MD_SHIFT 12 |
131 | #define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT) | 147 | #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) |
132 | 148 | ||
133 | #define CHA_ID_RNG_SHIFT 16 | 149 | #define CHA_ID_LS_RNG_SHIFT 16 |
134 | #define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT) | 150 | #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) |
135 | 151 | ||
136 | #define CHA_ID_SNW8_SHIFT 20 | 152 | #define CHA_ID_LS_SNW8_SHIFT 20 |
137 | #define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT) | 153 | #define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT) |
138 | 154 | ||
139 | #define CHA_ID_KAS_SHIFT 24 | 155 | #define CHA_ID_LS_KAS_SHIFT 24 |
140 | #define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT) | 156 | #define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT) |
141 | 157 | ||
142 | #define CHA_ID_PK_SHIFT 28 | 158 | #define CHA_ID_LS_PK_SHIFT 28 |
143 | #define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT) | 159 | #define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT) |
144 | 160 | ||
145 | #define CHA_ID_CRC_SHIFT 32 | 161 | #define CHA_ID_MS_CRC_SHIFT 0 |
146 | #define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT) | 162 | #define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT) |
147 | 163 | ||
148 | #define CHA_ID_SNW9_SHIFT 36 | 164 | #define CHA_ID_MS_SNW9_SHIFT 4 |
149 | #define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT) | 165 | #define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT) |
150 | 166 | ||
151 | #define CHA_ID_DECO_SHIFT 56 | 167 | #define CHA_ID_MS_DECO_SHIFT 24 |
152 | #define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT) | 168 | #define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT) |
153 | 169 | ||
154 | #define CHA_ID_JR_SHIFT 60 | 170 | #define CHA_ID_MS_JR_SHIFT 28 |
155 | #define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT) | 171 | #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT) |
156 | 172 | ||
157 | struct sec_vid { | 173 | struct sec_vid { |
158 | u16 ip_id; | 174 | u16 ip_id; |
@@ -172,10 +188,14 @@ struct caam_perfmon { | |||
172 | u64 rsvd[13]; | 188 | u64 rsvd[13]; |
173 | 189 | ||
174 | /* CAAM Hardware Instantiation Parameters fa0-fbf */ | 190 | /* CAAM Hardware Instantiation Parameters fa0-fbf */ |
175 | u64 cha_rev; /* CRNR - CHA Revision Number */ | 191 | u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/ |
176 | #define CTPR_QI_SHIFT 57 | 192 | u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ |
177 | #define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT) | 193 | #define CTPR_MS_QI_SHIFT 25 |
178 | u64 comp_parms; /* CTPR - Compile Parameters Register */ | 194 | #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) |
195 | #define CTPR_MS_VIRT_EN_INCL 0x00000001 | ||
196 | #define CTPR_MS_VIRT_EN_POR 0x00000002 | ||
197 | u32 comp_parms_ms; /* CTPR - Compile Parameters Register */ | ||
198 | u32 comp_parms_ls; /* CTPR - Compile Parameters Register */ | ||
179 | u64 rsvd1[2]; | 199 | u64 rsvd1[2]; |
180 | 200 | ||
181 | /* CAAM Global Status fc0-fdf */ | 201 | /* CAAM Global Status fc0-fdf */ |
@@ -189,9 +209,12 @@ struct caam_perfmon { | |||
189 | /* Component Instantiation Parameters fe0-fff */ | 209 | /* Component Instantiation Parameters fe0-fff */ |
190 | u32 rtic_id; /* RVID - RTIC Version ID */ | 210 | u32 rtic_id; /* RVID - RTIC Version ID */ |
191 | u32 ccb_id; /* CCBVID - CCB Version ID */ | 211 | u32 ccb_id; /* CCBVID - CCB Version ID */ |
192 | u64 cha_id; /* CHAVID - CHA Version ID */ | 212 | u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/ |
193 | u64 cha_num; /* CHANUM - CHA Number */ | 213 | u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/ |
194 | u64 caam_id; /* CAAMVID - CAAM Version ID */ | 214 | u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */ |
215 | u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/ | ||
216 | u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */ | ||
217 | u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */ | ||
195 | }; | 218 | }; |
196 | 219 | ||
197 | /* LIODN programming for DMA configuration */ | 220 | /* LIODN programming for DMA configuration */ |
@@ -304,9 +327,12 @@ struct caam_ctrl { | |||
304 | /* Bus Access Configuration Section 010-11f */ | 327 | /* Bus Access Configuration Section 010-11f */ |
305 | /* Read/Writable */ | 328 | /* Read/Writable */ |
306 | struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */ | 329 | struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */ |
307 | u32 rsvd3[12]; | 330 | u32 rsvd3[11]; |
331 | u32 jrstart; /* JRSTART - Job Ring Start Register */ | ||
308 | struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */ | 332 | struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */ |
309 | u32 rsvd4[7]; | 333 | u32 rsvd4[5]; |
334 | u32 deco_rsr; /* DECORSR - Deco Request Source */ | ||
335 | u32 rsvd11; | ||
310 | u32 deco_rq; /* DECORR - DECO Request */ | 336 | u32 deco_rq; /* DECORR - DECO Request */ |
311 | struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ | 337 | struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ |
312 | u32 rsvd5[22]; | 338 | u32 rsvd5[22]; |
@@ -347,7 +373,10 @@ struct caam_ctrl { | |||
347 | #define MCFGR_DMA_RESET 0x10000000 | 373 | #define MCFGR_DMA_RESET 0x10000000 |
348 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ | 374 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ |
349 | #define SCFGR_RDBENABLE 0x00000400 | 375 | #define SCFGR_RDBENABLE 0x00000400 |
376 | #define SCFGR_VIRT_EN 0x00008000 | ||
350 | #define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ | 377 | #define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ |
378 | #define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */ | ||
379 | #define DECORSR_VALID 0x80000000 | ||
351 | #define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ | 380 | #define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ |
352 | 381 | ||
353 | /* AXI read cache control */ | 382 | /* AXI read cache control */ |
@@ -365,6 +394,12 @@ struct caam_ctrl { | |||
365 | #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ | 394 | #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ |
366 | #define MCFGR_BURST_64 0x00000001 /* Max burst size */ | 395 | #define MCFGR_BURST_64 0x00000001 /* Max burst size */ |
367 | 396 | ||
397 | /* JRSTART register offsets */ | ||
398 | #define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */ | ||
399 | #define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */ | ||
400 | #define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */ | ||
401 | #define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */ | ||
402 | |||
368 | /* | 403 | /* |
369 | * caam_job_ring - direct job ring setup | 404 | * caam_job_ring - direct job ring setup |
370 | * 1-4 possible per instantiation, base + 1000/2000/3000/4000 | 405 | * 1-4 possible per instantiation, base + 1000/2000/3000/4000 |
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index d3505a018720..7f592d8d07bb 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile | |||
@@ -1,6 +1,11 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o | 1 | obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o |
2 | ccp-objs := ccp-dev.o ccp-ops.o | 2 | ccp-objs := ccp-dev.o ccp-ops.o |
3 | ifdef CONFIG_X86 | ||
3 | ccp-objs += ccp-pci.o | 4 | ccp-objs += ccp-pci.o |
5 | endif | ||
6 | ifdef CONFIG_ARM64 | ||
7 | ccp-objs += ccp-platform.o | ||
8 | endif | ||
4 | 9 | ||
5 | obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o | 10 | obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o |
6 | ccp-crypto-objs := ccp-crypto-main.o \ | 11 | ccp-crypto-objs := ccp-crypto-main.o \ |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 2c7816149b01..a7d110652a74 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -20,7 +20,9 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/hw_random.h> | 21 | #include <linux/hw_random.h> |
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #ifdef CONFIG_X86 | ||
23 | #include <asm/cpu_device_id.h> | 24 | #include <asm/cpu_device_id.h> |
25 | #endif | ||
24 | #include <linux/ccp.h> | 26 | #include <linux/ccp.h> |
25 | 27 | ||
26 | #include "ccp-dev.h" | 28 | #include "ccp-dev.h" |
@@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp) | |||
360 | /* Build queue interrupt mask (two interrupts per queue) */ | 362 | /* Build queue interrupt mask (two interrupts per queue) */ |
361 | qim |= cmd_q->int_ok | cmd_q->int_err; | 363 | qim |= cmd_q->int_ok | cmd_q->int_err; |
362 | 364 | ||
365 | #ifdef CONFIG_ARM64 | ||
366 | /* For arm64 set the recommended queue cache settings */ | ||
367 | iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + | ||
368 | (CMD_Q_CACHE_INC * i)); | ||
369 | #endif | ||
370 | |||
363 | dev_dbg(dev, "queue #%u available\n", i); | 371 | dev_dbg(dev, "queue #%u available\n", i); |
364 | } | 372 | } |
365 | if (ccp->cmd_q_count == 0) { | 373 | if (ccp->cmd_q_count == 0) { |
@@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp) | |||
558 | } | 566 | } |
559 | #endif | 567 | #endif |
560 | 568 | ||
569 | #ifdef CONFIG_X86 | ||
561 | static const struct x86_cpu_id ccp_support[] = { | 570 | static const struct x86_cpu_id ccp_support[] = { |
562 | { X86_VENDOR_AMD, 22, }, | 571 | { X86_VENDOR_AMD, 22, }, |
563 | }; | 572 | }; |
573 | #endif | ||
564 | 574 | ||
565 | static int __init ccp_mod_init(void) | 575 | static int __init ccp_mod_init(void) |
566 | { | 576 | { |
577 | #ifdef CONFIG_X86 | ||
567 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; | 578 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; |
568 | int ret; | 579 | int ret; |
569 | 580 | ||
@@ -589,12 +600,30 @@ static int __init ccp_mod_init(void) | |||
589 | 600 | ||
590 | break; | 601 | break; |
591 | } | 602 | } |
603 | #endif | ||
604 | |||
605 | #ifdef CONFIG_ARM64 | ||
606 | int ret; | ||
607 | |||
608 | ret = ccp_platform_init(); | ||
609 | if (ret) | ||
610 | return ret; | ||
611 | |||
612 | /* Don't leave the driver loaded if init failed */ | ||
613 | if (!ccp_get_device()) { | ||
614 | ccp_platform_exit(); | ||
615 | return -ENODEV; | ||
616 | } | ||
617 | |||
618 | return 0; | ||
619 | #endif | ||
592 | 620 | ||
593 | return -ENODEV; | 621 | return -ENODEV; |
594 | } | 622 | } |
595 | 623 | ||
596 | static void __exit ccp_mod_exit(void) | 624 | static void __exit ccp_mod_exit(void) |
597 | { | 625 | { |
626 | #ifdef CONFIG_X86 | ||
598 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; | 627 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; |
599 | 628 | ||
600 | switch (cpuinfo->x86) { | 629 | switch (cpuinfo->x86) { |
@@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void) | |||
602 | ccp_pci_exit(); | 631 | ccp_pci_exit(); |
603 | break; | 632 | break; |
604 | } | 633 | } |
634 | #endif | ||
635 | |||
636 | #ifdef CONFIG_ARM64 | ||
637 | ccp_platform_exit(); | ||
638 | #endif | ||
605 | } | 639 | } |
606 | 640 | ||
607 | module_init(ccp_mod_init); | 641 | module_init(ccp_mod_init); |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 7ec536e702ec..62ff35a6b9ec 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -23,8 +23,6 @@ | |||
23 | #include <linux/hw_random.h> | 23 | #include <linux/hw_random.h> |
24 | 24 | ||
25 | 25 | ||
26 | #define IO_OFFSET 0x20000 | ||
27 | |||
28 | #define MAX_DMAPOOL_NAME_LEN 32 | 26 | #define MAX_DMAPOOL_NAME_LEN 32 |
29 | 27 | ||
30 | #define MAX_HW_QUEUES 5 | 28 | #define MAX_HW_QUEUES 5 |
@@ -32,6 +30,9 @@ | |||
32 | 30 | ||
33 | #define TRNG_RETRIES 10 | 31 | #define TRNG_RETRIES 10 |
34 | 32 | ||
33 | #define CACHE_NONE 0x00 | ||
34 | #define CACHE_WB_NO_ALLOC 0xb7 | ||
35 | |||
35 | 36 | ||
36 | /****** Register Mappings ******/ | 37 | /****** Register Mappings ******/ |
37 | #define Q_MASK_REG 0x000 | 38 | #define Q_MASK_REG 0x000 |
@@ -50,7 +51,7 @@ | |||
50 | #define CMD_Q_INT_STATUS_BASE 0x214 | 51 | #define CMD_Q_INT_STATUS_BASE 0x214 |
51 | #define CMD_Q_STATUS_INCR 0x20 | 52 | #define CMD_Q_STATUS_INCR 0x20 |
52 | 53 | ||
53 | #define CMD_Q_CACHE 0x228 | 54 | #define CMD_Q_CACHE_BASE 0x228 |
54 | #define CMD_Q_CACHE_INC 0x20 | 55 | #define CMD_Q_CACHE_INC 0x20 |
55 | 56 | ||
56 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f); | 57 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f); |
@@ -194,6 +195,7 @@ struct ccp_device { | |||
194 | void *dev_specific; | 195 | void *dev_specific; |
195 | int (*get_irq)(struct ccp_device *ccp); | 196 | int (*get_irq)(struct ccp_device *ccp); |
196 | void (*free_irq)(struct ccp_device *ccp); | 197 | void (*free_irq)(struct ccp_device *ccp); |
198 | unsigned int irq; | ||
197 | 199 | ||
198 | /* | 200 | /* |
199 | * I/O area used for device communication. The register mapping | 201 | * I/O area used for device communication. The register mapping |
@@ -254,12 +256,18 @@ struct ccp_device { | |||
254 | /* Suspend support */ | 256 | /* Suspend support */ |
255 | unsigned int suspending; | 257 | unsigned int suspending; |
256 | wait_queue_head_t suspend_queue; | 258 | wait_queue_head_t suspend_queue; |
259 | |||
260 | /* DMA caching attribute support */ | ||
261 | unsigned int axcache; | ||
257 | }; | 262 | }; |
258 | 263 | ||
259 | 264 | ||
260 | int ccp_pci_init(void); | 265 | int ccp_pci_init(void); |
261 | void ccp_pci_exit(void); | 266 | void ccp_pci_exit(void); |
262 | 267 | ||
268 | int ccp_platform_init(void); | ||
269 | void ccp_platform_exit(void); | ||
270 | |||
263 | struct ccp_device *ccp_alloc_struct(struct device *dev); | 271 | struct ccp_device *ccp_alloc_struct(struct device *dev); |
264 | int ccp_init(struct ccp_device *ccp); | 272 | int ccp_init(struct ccp_device *ccp); |
265 | void ccp_destroy(struct ccp_device *ccp); | 273 | void ccp_destroy(struct ccp_device *ccp); |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 9ae006d69df4..8729364261d7 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1606 | goto e_ksb; | 1606 | goto e_ksb; |
1607 | 1607 | ||
1608 | ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES, | 1608 | ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES, |
1609 | true); | 1609 | false); |
1610 | ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, | 1610 | ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, |
1611 | CCP_PASSTHRU_BYTESWAP_NOOP); | 1611 | CCP_PASSTHRU_BYTESWAP_NOOP); |
1612 | if (ret) { | 1612 | if (ret) { |
@@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1623 | goto e_exp; | 1623 | goto e_exp; |
1624 | 1624 | ||
1625 | ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES, | 1625 | ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES, |
1626 | true); | 1626 | false); |
1627 | src.address += o_len; /* Adjust the address for the copy operation */ | 1627 | src.address += o_len; /* Adjust the address for the copy operation */ |
1628 | ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES, | 1628 | ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES, |
1629 | true); | 1629 | false); |
1630 | src.address -= o_len; /* Reset the address to original value */ | 1630 | src.address -= o_len; /* Reset the address to original value */ |
1631 | 1631 | ||
1632 | /* Prepare the output area for the operation */ | 1632 | /* Prepare the output area for the operation */ |
@@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1841 | 1841 | ||
1842 | /* Copy the ECC modulus */ | 1842 | /* Copy the ECC modulus */ |
1843 | ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, | 1843 | ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, |
1844 | CCP_ECC_OPERAND_SIZE, true); | 1844 | CCP_ECC_OPERAND_SIZE, false); |
1845 | src.address += CCP_ECC_OPERAND_SIZE; | 1845 | src.address += CCP_ECC_OPERAND_SIZE; |
1846 | 1846 | ||
1847 | /* Copy the first operand */ | 1847 | /* Copy the first operand */ |
1848 | ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1, | 1848 | ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1, |
1849 | ecc->u.mm.operand_1_len, | 1849 | ecc->u.mm.operand_1_len, |
1850 | CCP_ECC_OPERAND_SIZE, true); | 1850 | CCP_ECC_OPERAND_SIZE, false); |
1851 | src.address += CCP_ECC_OPERAND_SIZE; | 1851 | src.address += CCP_ECC_OPERAND_SIZE; |
1852 | 1852 | ||
1853 | if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { | 1853 | if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { |
1854 | /* Copy the second operand */ | 1854 | /* Copy the second operand */ |
1855 | ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2, | 1855 | ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2, |
1856 | ecc->u.mm.operand_2_len, | 1856 | ecc->u.mm.operand_2_len, |
1857 | CCP_ECC_OPERAND_SIZE, true); | 1857 | CCP_ECC_OPERAND_SIZE, false); |
1858 | src.address += CCP_ECC_OPERAND_SIZE; | 1858 | src.address += CCP_ECC_OPERAND_SIZE; |
1859 | } | 1859 | } |
1860 | 1860 | ||
@@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1960 | 1960 | ||
1961 | /* Copy the ECC modulus */ | 1961 | /* Copy the ECC modulus */ |
1962 | ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, | 1962 | ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, |
1963 | CCP_ECC_OPERAND_SIZE, true); | 1963 | CCP_ECC_OPERAND_SIZE, false); |
1964 | src.address += CCP_ECC_OPERAND_SIZE; | 1964 | src.address += CCP_ECC_OPERAND_SIZE; |
1965 | 1965 | ||
1966 | /* Copy the first point X and Y coordinate */ | 1966 | /* Copy the first point X and Y coordinate */ |
1967 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x, | 1967 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x, |
1968 | ecc->u.pm.point_1.x_len, | 1968 | ecc->u.pm.point_1.x_len, |
1969 | CCP_ECC_OPERAND_SIZE, true); | 1969 | CCP_ECC_OPERAND_SIZE, false); |
1970 | src.address += CCP_ECC_OPERAND_SIZE; | 1970 | src.address += CCP_ECC_OPERAND_SIZE; |
1971 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, | 1971 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, |
1972 | ecc->u.pm.point_1.y_len, | 1972 | ecc->u.pm.point_1.y_len, |
1973 | CCP_ECC_OPERAND_SIZE, true); | 1973 | CCP_ECC_OPERAND_SIZE, false); |
1974 | src.address += CCP_ECC_OPERAND_SIZE; | 1974 | src.address += CCP_ECC_OPERAND_SIZE; |
1975 | 1975 | ||
1976 | /* Set the first point Z coordianate to 1 */ | 1976 | /* Set the first point Z coordianate to 1 */ |
@@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1981 | /* Copy the second point X and Y coordinate */ | 1981 | /* Copy the second point X and Y coordinate */ |
1982 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, | 1982 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, |
1983 | ecc->u.pm.point_2.x_len, | 1983 | ecc->u.pm.point_2.x_len, |
1984 | CCP_ECC_OPERAND_SIZE, true); | 1984 | CCP_ECC_OPERAND_SIZE, false); |
1985 | src.address += CCP_ECC_OPERAND_SIZE; | 1985 | src.address += CCP_ECC_OPERAND_SIZE; |
1986 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, | 1986 | ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, |
1987 | ecc->u.pm.point_2.y_len, | 1987 | ecc->u.pm.point_2.y_len, |
1988 | CCP_ECC_OPERAND_SIZE, true); | 1988 | CCP_ECC_OPERAND_SIZE, false); |
1989 | src.address += CCP_ECC_OPERAND_SIZE; | 1989 | src.address += CCP_ECC_OPERAND_SIZE; |
1990 | 1990 | ||
1991 | /* Set the second point Z coordianate to 1 */ | 1991 | /* Set the second point Z coordianate to 1 */ |
@@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1995 | /* Copy the Domain "a" parameter */ | 1995 | /* Copy the Domain "a" parameter */ |
1996 | ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, | 1996 | ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, |
1997 | ecc->u.pm.domain_a_len, | 1997 | ecc->u.pm.domain_a_len, |
1998 | CCP_ECC_OPERAND_SIZE, true); | 1998 | CCP_ECC_OPERAND_SIZE, false); |
1999 | src.address += CCP_ECC_OPERAND_SIZE; | 1999 | src.address += CCP_ECC_OPERAND_SIZE; |
2000 | 2000 | ||
2001 | if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { | 2001 | if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { |
2002 | /* Copy the scalar value */ | 2002 | /* Copy the scalar value */ |
2003 | ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar, | 2003 | ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar, |
2004 | ecc->u.pm.scalar_len, | 2004 | ecc->u.pm.scalar_len, |
2005 | CCP_ECC_OPERAND_SIZE, true); | 2005 | CCP_ECC_OPERAND_SIZE, false); |
2006 | src.address += CCP_ECC_OPERAND_SIZE; | 2006 | src.address += CCP_ECC_OPERAND_SIZE; |
2007 | } | 2007 | } |
2008 | } | 2008 | } |
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 0d746236df5e..180cc87b4dbb 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c | |||
@@ -12,8 +12,10 @@ | |||
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/device.h> | ||
15 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
16 | #include <linux/pci_ids.h> | 17 | #include <linux/pci_ids.h> |
18 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/kthread.h> | 19 | #include <linux/kthread.h> |
18 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
19 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
@@ -24,6 +26,8 @@ | |||
24 | #include "ccp-dev.h" | 26 | #include "ccp-dev.h" |
25 | 27 | ||
26 | #define IO_BAR 2 | 28 | #define IO_BAR 2 |
29 | #define IO_OFFSET 0x20000 | ||
30 | |||
27 | #define MSIX_VECTORS 2 | 31 | #define MSIX_VECTORS 2 |
28 | 32 | ||
29 | struct ccp_msix { | 33 | struct ccp_msix { |
@@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp) | |||
89 | if (ret) | 93 | if (ret) |
90 | return ret; | 94 | return ret; |
91 | 95 | ||
92 | ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev); | 96 | ccp->irq = pdev->irq; |
97 | ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); | ||
93 | if (ret) { | 98 | if (ret) { |
94 | dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); | 99 | dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); |
95 | goto e_msi; | 100 | goto e_msi; |
@@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) | |||
136 | dev); | 141 | dev); |
137 | pci_disable_msix(pdev); | 142 | pci_disable_msix(pdev); |
138 | } else { | 143 | } else { |
139 | free_irq(pdev->irq, dev); | 144 | free_irq(ccp->irq, dev); |
140 | pci_disable_msi(pdev); | 145 | pci_disable_msi(pdev); |
141 | } | 146 | } |
142 | } | 147 | } |
@@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp) | |||
147 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | 152 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); |
148 | resource_size_t io_len; | 153 | resource_size_t io_len; |
149 | unsigned long io_flags; | 154 | unsigned long io_flags; |
150 | int bar; | ||
151 | 155 | ||
152 | io_flags = pci_resource_flags(pdev, IO_BAR); | 156 | io_flags = pci_resource_flags(pdev, IO_BAR); |
153 | io_len = pci_resource_len(pdev, IO_BAR); | 157 | io_len = pci_resource_len(pdev, IO_BAR); |
154 | if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) | 158 | if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) |
155 | return IO_BAR; | 159 | return IO_BAR; |
156 | 160 | ||
157 | for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) { | ||
158 | io_flags = pci_resource_flags(pdev, bar); | ||
159 | io_len = pci_resource_len(pdev, bar); | ||
160 | if ((io_flags & IORESOURCE_MEM) && | ||
161 | (io_len >= (IO_OFFSET + 0x800))) | ||
162 | return bar; | ||
163 | } | ||
164 | |||
165 | return -EIO; | 161 | return -EIO; |
166 | } | 162 | } |
167 | 163 | ||
@@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
214 | } | 210 | } |
215 | ccp->io_regs = ccp->io_map + IO_OFFSET; | 211 | ccp->io_regs = ccp->io_map + IO_OFFSET; |
216 | 212 | ||
217 | ret = dma_set_mask(dev, DMA_BIT_MASK(48)); | 213 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); |
218 | if (ret == 0) { | 214 | if (ret) { |
219 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48)); | 215 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
220 | if (ret) { | 216 | if (ret) { |
221 | dev_err(dev, | 217 | dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", |
222 | "pci_set_consistent_dma_mask failed (%d)\n", | ||
223 | ret); | 218 | ret); |
224 | goto e_bar0; | 219 | goto e_iomap; |
225 | } | ||
226 | } else { | ||
227 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
228 | if (ret) { | ||
229 | dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret); | ||
230 | goto e_bar0; | ||
231 | } | 220 | } |
232 | } | 221 | } |
233 | 222 | ||
@@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
235 | 224 | ||
236 | ret = ccp_init(ccp); | 225 | ret = ccp_init(ccp); |
237 | if (ret) | 226 | if (ret) |
238 | goto e_bar0; | 227 | goto e_iomap; |
239 | 228 | ||
240 | dev_notice(dev, "enabled\n"); | 229 | dev_notice(dev, "enabled\n"); |
241 | 230 | ||
242 | return 0; | 231 | return 0; |
243 | 232 | ||
244 | e_bar0: | 233 | e_iomap: |
245 | pci_iounmap(pdev, ccp->io_map); | 234 | pci_iounmap(pdev, ccp->io_map); |
246 | 235 | ||
247 | e_device: | 236 | e_device: |
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c new file mode 100644 index 000000000000..b0a2806908f1 --- /dev/null +++ b/drivers/crypto/ccp/ccp-platform.c | |||
@@ -0,0 +1,230 @@ | |||
1 | /* | ||
2 | * AMD Cryptographic Coprocessor (CCP) driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Advanced Micro Devices, Inc. | ||
5 | * | ||
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/kthread.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/ccp.h> | ||
25 | #include <linux/of.h> | ||
26 | |||
27 | #include "ccp-dev.h" | ||
28 | |||
29 | |||
30 | static int ccp_get_irq(struct ccp_device *ccp) | ||
31 | { | ||
32 | struct device *dev = ccp->dev; | ||
33 | struct platform_device *pdev = container_of(dev, | ||
34 | struct platform_device, dev); | ||
35 | int ret; | ||
36 | |||
37 | ret = platform_get_irq(pdev, 0); | ||
38 | if (ret < 0) | ||
39 | return ret; | ||
40 | |||
41 | ccp->irq = ret; | ||
42 | ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); | ||
43 | if (ret) { | ||
44 | dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | static int ccp_get_irqs(struct ccp_device *ccp) | ||
52 | { | ||
53 | struct device *dev = ccp->dev; | ||
54 | int ret; | ||
55 | |||
56 | ret = ccp_get_irq(ccp); | ||
57 | if (!ret) | ||
58 | return 0; | ||
59 | |||
60 | /* Couldn't get an interrupt */ | ||
61 | dev_notice(dev, "could not enable interrupts (%d)\n", ret); | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | static void ccp_free_irqs(struct ccp_device *ccp) | ||
67 | { | ||
68 | struct device *dev = ccp->dev; | ||
69 | |||
70 | free_irq(ccp->irq, dev); | ||
71 | } | ||
72 | |||
73 | static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) | ||
74 | { | ||
75 | struct device *dev = ccp->dev; | ||
76 | struct platform_device *pdev = container_of(dev, | ||
77 | struct platform_device, dev); | ||
78 | struct resource *ior; | ||
79 | |||
80 | ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
81 | if (ior && (resource_size(ior) >= 0x800)) | ||
82 | return ior; | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | |||
87 | static int ccp_platform_probe(struct platform_device *pdev) | ||
88 | { | ||
89 | struct ccp_device *ccp; | ||
90 | struct device *dev = &pdev->dev; | ||
91 | struct resource *ior; | ||
92 | int ret; | ||
93 | |||
94 | ret = -ENOMEM; | ||
95 | ccp = ccp_alloc_struct(dev); | ||
96 | if (!ccp) | ||
97 | goto e_err; | ||
98 | |||
99 | ccp->dev_specific = NULL; | ||
100 | ccp->get_irq = ccp_get_irqs; | ||
101 | ccp->free_irq = ccp_free_irqs; | ||
102 | |||
103 | ior = ccp_find_mmio_area(ccp); | ||
104 | ccp->io_map = devm_ioremap_resource(dev, ior); | ||
105 | if (IS_ERR(ccp->io_map)) { | ||
106 | ret = PTR_ERR(ccp->io_map); | ||
107 | goto e_free; | ||
108 | } | ||
109 | ccp->io_regs = ccp->io_map; | ||
110 | |||
111 | if (!dev->dma_mask) | ||
112 | dev->dma_mask = &dev->coherent_dma_mask; | ||
113 | *(dev->dma_mask) = DMA_BIT_MASK(48); | ||
114 | dev->coherent_dma_mask = DMA_BIT_MASK(48); | ||
115 | |||
116 | if (of_property_read_bool(dev->of_node, "dma-coherent")) | ||
117 | ccp->axcache = CACHE_WB_NO_ALLOC; | ||
118 | else | ||
119 | ccp->axcache = CACHE_NONE; | ||
120 | |||
121 | dev_set_drvdata(dev, ccp); | ||
122 | |||
123 | ret = ccp_init(ccp); | ||
124 | if (ret) | ||
125 | goto e_free; | ||
126 | |||
127 | dev_notice(dev, "enabled\n"); | ||
128 | |||
129 | return 0; | ||
130 | |||
131 | e_free: | ||
132 | kfree(ccp); | ||
133 | |||
134 | e_err: | ||
135 | dev_notice(dev, "initialization failed\n"); | ||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static int ccp_platform_remove(struct platform_device *pdev) | ||
140 | { | ||
141 | struct device *dev = &pdev->dev; | ||
142 | struct ccp_device *ccp = dev_get_drvdata(dev); | ||
143 | |||
144 | ccp_destroy(ccp); | ||
145 | |||
146 | kfree(ccp); | ||
147 | |||
148 | dev_notice(dev, "disabled\n"); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | #ifdef CONFIG_PM | ||
154 | static int ccp_platform_suspend(struct platform_device *pdev, | ||
155 | pm_message_t state) | ||
156 | { | ||
157 | struct device *dev = &pdev->dev; | ||
158 | struct ccp_device *ccp = dev_get_drvdata(dev); | ||
159 | unsigned long flags; | ||
160 | unsigned int i; | ||
161 | |||
162 | spin_lock_irqsave(&ccp->cmd_lock, flags); | ||
163 | |||
164 | ccp->suspending = 1; | ||
165 | |||
166 | /* Wake all the queue kthreads to prepare for suspend */ | ||
167 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
168 | wake_up_process(ccp->cmd_q[i].kthread); | ||
169 | |||
170 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | ||
171 | |||
172 | /* Wait for all queue kthreads to say they're done */ | ||
173 | while (!ccp_queues_suspended(ccp)) | ||
174 | wait_event_interruptible(ccp->suspend_queue, | ||
175 | ccp_queues_suspended(ccp)); | ||
176 | |||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static int ccp_platform_resume(struct platform_device *pdev) | ||
181 | { | ||
182 | struct device *dev = &pdev->dev; | ||
183 | struct ccp_device *ccp = dev_get_drvdata(dev); | ||
184 | unsigned long flags; | ||
185 | unsigned int i; | ||
186 | |||
187 | spin_lock_irqsave(&ccp->cmd_lock, flags); | ||
188 | |||
189 | ccp->suspending = 0; | ||
190 | |||
191 | /* Wake up all the kthreads */ | ||
192 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
193 | ccp->cmd_q[i].suspended = 0; | ||
194 | wake_up_process(ccp->cmd_q[i].kthread); | ||
195 | } | ||
196 | |||
197 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | #endif | ||
202 | |||
203 | static const struct of_device_id ccp_platform_ids[] = { | ||
204 | { .compatible = "amd,ccp-seattle-v1a" }, | ||
205 | { }, | ||
206 | }; | ||
207 | |||
208 | static struct platform_driver ccp_platform_driver = { | ||
209 | .driver = { | ||
210 | .name = "AMD Cryptographic Coprocessor", | ||
211 | .owner = THIS_MODULE, | ||
212 | .of_match_table = ccp_platform_ids, | ||
213 | }, | ||
214 | .probe = ccp_platform_probe, | ||
215 | .remove = ccp_platform_remove, | ||
216 | #ifdef CONFIG_PM | ||
217 | .suspend = ccp_platform_suspend, | ||
218 | .resume = ccp_platform_resume, | ||
219 | #endif | ||
220 | }; | ||
221 | |||
222 | int ccp_platform_init(void) | ||
223 | { | ||
224 | return platform_driver_register(&ccp_platform_driver); | ||
225 | } | ||
226 | |||
227 | void ccp_platform_exit(void) | ||
228 | { | ||
229 | platform_driver_unregister(&ccp_platform_driver); | ||
230 | } | ||
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 502edf0a2933..544f6d327ede 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c | |||
@@ -1247,7 +1247,7 @@ static struct vio_device_id nx842_driver_ids[] = { | |||
1247 | static struct vio_driver nx842_driver = { | 1247 | static struct vio_driver nx842_driver = { |
1248 | .name = MODULE_NAME, | 1248 | .name = MODULE_NAME, |
1249 | .probe = nx842_probe, | 1249 | .probe = nx842_probe, |
1250 | .remove = nx842_remove, | 1250 | .remove = __exit_p(nx842_remove), |
1251 | .get_desired_dma = nx842_get_desired_dma, | 1251 | .get_desired_dma = nx842_get_desired_dma, |
1252 | .id_table = nx842_driver_ids, | 1252 | .id_table = nx842_driver_ids, |
1253 | }; | 1253 | }; |
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig new file mode 100644 index 000000000000..49bede2a9f77 --- /dev/null +++ b/drivers/crypto/qat/Kconfig | |||
@@ -0,0 +1,23 @@ | |||
1 | config CRYPTO_DEV_QAT | ||
2 | tristate | ||
3 | select CRYPTO_AEAD | ||
4 | select CRYPTO_AUTHENC | ||
5 | select CRYPTO_ALGAPI | ||
6 | select CRYPTO_AES | ||
7 | select CRYPTO_CBC | ||
8 | select CRYPTO_SHA1 | ||
9 | select CRYPTO_SHA256 | ||
10 | select CRYPTO_SHA512 | ||
11 | select FW_LOADER | ||
12 | |||
13 | config CRYPTO_DEV_QAT_DH895xCC | ||
14 | tristate "Support for Intel(R) DH895xCC" | ||
15 | depends on X86 && PCI | ||
16 | default n | ||
17 | select CRYPTO_DEV_QAT | ||
18 | help | ||
19 | Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology | ||
20 | for accelerating crypto and compression workloads. | ||
21 | |||
22 | To compile this as a module, choose M here: the module | ||
23 | will be called qat_dh895xcc. | ||
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile new file mode 100644 index 000000000000..d11481be225e --- /dev/null +++ b/drivers/crypto/qat/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ | ||
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ | ||
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile new file mode 100644 index 000000000000..e0424dc382fe --- /dev/null +++ b/drivers/crypto/qat/qat_common/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o | ||
2 | intel_qat-objs := adf_cfg.o \ | ||
3 | adf_ctl_drv.o \ | ||
4 | adf_dev_mgr.o \ | ||
5 | adf_init.o \ | ||
6 | adf_accel_engine.o \ | ||
7 | adf_aer.o \ | ||
8 | adf_transport.o \ | ||
9 | qat_crypto.o \ | ||
10 | qat_algs.o \ | ||
11 | qat_uclo.o \ | ||
12 | qat_hal.o | ||
13 | |||
14 | intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o | ||
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h new file mode 100644 index 000000000000..9282381b03ce --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_ACCEL_DEVICES_H_ | ||
48 | #define ADF_ACCEL_DEVICES_H_ | ||
49 | #include <linux/module.h> | ||
50 | #include <linux/atomic.h> | ||
51 | #include <linux/list.h> | ||
52 | #include <linux/proc_fs.h> | ||
53 | #include <linux/io.h> | ||
54 | #include "adf_cfg_common.h" | ||
55 | |||
56 | #define PCI_VENDOR_ID_INTEL 0x8086 | ||
57 | #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" | ||
58 | #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 | ||
59 | #define ADF_DH895XCC_PMISC_BAR 1 | ||
60 | #define ADF_DH895XCC_ETR_BAR 2 | ||
61 | #define ADF_PCI_MAX_BARS 3 | ||
62 | #define ADF_DEVICE_NAME_LENGTH 32 | ||
63 | #define ADF_ETR_MAX_RINGS_PER_BANK 16 | ||
64 | #define ADF_MAX_MSIX_VECTOR_NAME 16 | ||
65 | #define ADF_DEVICE_NAME_PREFIX "qat_" | ||
66 | |||
67 | enum adf_accel_capabilities { | ||
68 | ADF_ACCEL_CAPABILITIES_NULL = 0, | ||
69 | ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, | ||
70 | ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, | ||
71 | ADF_ACCEL_CAPABILITIES_CIPHER = 4, | ||
72 | ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, | ||
73 | ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, | ||
74 | ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64, | ||
75 | ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 | ||
76 | }; | ||
77 | |||
78 | struct adf_bar { | ||
79 | resource_size_t base_addr; | ||
80 | void __iomem *virt_addr; | ||
81 | resource_size_t size; | ||
82 | } __packed; | ||
83 | |||
84 | struct adf_accel_msix { | ||
85 | struct msix_entry *entries; | ||
86 | char **names; | ||
87 | } __packed; | ||
88 | |||
89 | struct adf_accel_pci { | ||
90 | struct pci_dev *pci_dev; | ||
91 | struct adf_accel_msix msix_entries; | ||
92 | struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; | ||
93 | uint8_t revid; | ||
94 | uint8_t sku; | ||
95 | } __packed; | ||
96 | |||
97 | enum dev_state { | ||
98 | DEV_DOWN = 0, | ||
99 | DEV_UP | ||
100 | }; | ||
101 | |||
102 | enum dev_sku_info { | ||
103 | DEV_SKU_1 = 0, | ||
104 | DEV_SKU_2, | ||
105 | DEV_SKU_3, | ||
106 | DEV_SKU_4, | ||
107 | DEV_SKU_UNKNOWN, | ||
108 | }; | ||
109 | |||
110 | static inline const char *get_sku_info(enum dev_sku_info info) | ||
111 | { | ||
112 | switch (info) { | ||
113 | case DEV_SKU_1: | ||
114 | return "SKU1"; | ||
115 | case DEV_SKU_2: | ||
116 | return "SKU2"; | ||
117 | case DEV_SKU_3: | ||
118 | return "SKU3"; | ||
119 | case DEV_SKU_4: | ||
120 | return "SKU4"; | ||
121 | case DEV_SKU_UNKNOWN: | ||
122 | default: | ||
123 | break; | ||
124 | } | ||
125 | return "Unknown SKU"; | ||
126 | } | ||
127 | |||
128 | struct adf_hw_device_class { | ||
129 | const char *name; | ||
130 | const enum adf_device_type type; | ||
131 | uint32_t instances; | ||
132 | } __packed; | ||
133 | |||
134 | struct adf_cfg_device_data; | ||
135 | struct adf_accel_dev; | ||
136 | struct adf_etr_data; | ||
137 | struct adf_etr_ring_data; | ||
138 | |||
139 | struct adf_hw_device_data { | ||
140 | struct adf_hw_device_class *dev_class; | ||
141 | uint32_t (*get_accel_mask)(uint32_t fuse); | ||
142 | uint32_t (*get_ae_mask)(uint32_t fuse); | ||
143 | uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); | ||
144 | uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); | ||
145 | uint32_t (*get_num_aes)(struct adf_hw_device_data *self); | ||
146 | uint32_t (*get_num_accels)(struct adf_hw_device_data *self); | ||
147 | enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); | ||
148 | void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring); | ||
149 | void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring); | ||
150 | int (*alloc_irq)(struct adf_accel_dev *accel_dev); | ||
151 | void (*free_irq)(struct adf_accel_dev *accel_dev); | ||
152 | void (*enable_error_correction)(struct adf_accel_dev *accel_dev); | ||
153 | const char *fw_name; | ||
154 | uint32_t pci_dev_id; | ||
155 | uint32_t fuses; | ||
156 | uint32_t accel_capabilities_mask; | ||
157 | uint16_t accel_mask; | ||
158 | uint16_t ae_mask; | ||
159 | uint16_t tx_rings_mask; | ||
160 | uint8_t tx_rx_gap; | ||
161 | uint8_t instance_id; | ||
162 | uint8_t num_banks; | ||
163 | uint8_t num_accel; | ||
164 | uint8_t num_logical_accel; | ||
165 | uint8_t num_engines; | ||
166 | } __packed; | ||
167 | |||
168 | /* CSR write macro */ | ||
169 | #define ADF_CSR_WR(csr_base, csr_offset, val) \ | ||
170 | __raw_writel(val, csr_base + csr_offset) | ||
171 | |||
172 | /* CSR read macro */ | ||
173 | #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset) | ||
174 | |||
175 | #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev) | ||
176 | #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) | ||
177 | #define GET_HW_DATA(accel_dev) (accel_dev->hw_device) | ||
178 | #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) | ||
179 | #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) | ||
180 | #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev | ||
181 | |||
182 | struct adf_admin_comms; | ||
183 | struct icp_qat_fw_loader_handle; | ||
184 | struct adf_fw_loader_data { | ||
185 | struct icp_qat_fw_loader_handle *fw_loader; | ||
186 | const struct firmware *uof_fw; | ||
187 | }; | ||
188 | |||
189 | struct adf_accel_dev { | ||
190 | struct adf_etr_data *transport; | ||
191 | struct adf_hw_device_data *hw_device; | ||
192 | struct adf_cfg_device_data *cfg; | ||
193 | struct adf_fw_loader_data *fw_loader; | ||
194 | struct adf_admin_comms *admin; | ||
195 | struct list_head crypto_list; | ||
196 | unsigned long status; | ||
197 | atomic_t ref_count; | ||
198 | struct dentry *debugfs_dir; | ||
199 | struct list_head list; | ||
200 | struct module *owner; | ||
201 | uint8_t accel_id; | ||
202 | uint8_t numa_node; | ||
203 | struct adf_accel_pci accel_pci_dev; | ||
204 | } __packed; | ||
205 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c new file mode 100644 index 000000000000..c77453b900a3 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/firmware.h> | ||
48 | #include <linux/pci.h> | ||
49 | #include "adf_cfg.h" | ||
50 | #include "adf_accel_devices.h" | ||
51 | #include "adf_common_drv.h" | ||
52 | #include "icp_qat_uclo.h" | ||
53 | |||
54 | int adf_ae_fw_load(struct adf_accel_dev *accel_dev) | ||
55 | { | ||
56 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
57 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
58 | void *uof_addr; | ||
59 | uint32_t uof_size; | ||
60 | |||
61 | if (request_firmware(&loader_data->uof_fw, hw_device->fw_name, | ||
62 | &accel_dev->accel_pci_dev.pci_dev->dev)) { | ||
63 | pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name); | ||
64 | return -EFAULT; | ||
65 | } | ||
66 | |||
67 | uof_size = loader_data->uof_fw->size; | ||
68 | uof_addr = (void *)loader_data->uof_fw->data; | ||
69 | if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { | ||
70 | pr_err("QAT: Failed to map UOF\n"); | ||
71 | goto out_err; | ||
72 | } | ||
73 | if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { | ||
74 | pr_err("QAT: Failed to map UOF\n"); | ||
75 | goto out_err; | ||
76 | } | ||
77 | return 0; | ||
78 | |||
79 | out_err: | ||
80 | release_firmware(loader_data->uof_fw); | ||
81 | return -EFAULT; | ||
82 | } | ||
83 | |||
84 | int adf_ae_fw_release(struct adf_accel_dev *accel_dev) | ||
85 | { | ||
86 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
87 | |||
88 | release_firmware(loader_data->uof_fw); | ||
89 | qat_uclo_del_uof_obj(loader_data->fw_loader); | ||
90 | qat_hal_deinit(loader_data->fw_loader); | ||
91 | loader_data->fw_loader = NULL; | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | int adf_ae_start(struct adf_accel_dev *accel_dev) | ||
96 | { | ||
97 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
98 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
99 | uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); | ||
100 | |||
101 | for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { | ||
102 | if (hw_data->ae_mask & (1 << ae)) { | ||
103 | qat_hal_start(loader_data->fw_loader, ae, 0xFF); | ||
104 | ae_ctr++; | ||
105 | } | ||
106 | } | ||
107 | pr_info("QAT: qat_dev%d started %d acceleration engines\n", | ||
108 | accel_dev->accel_id, ae_ctr); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | int adf_ae_stop(struct adf_accel_dev *accel_dev) | ||
113 | { | ||
114 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
115 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
116 | uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); | ||
117 | |||
118 | for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { | ||
119 | if (hw_data->ae_mask & (1 << ae)) { | ||
120 | qat_hal_stop(loader_data->fw_loader, ae, 0xFF); | ||
121 | ae_ctr++; | ||
122 | } | ||
123 | } | ||
124 | pr_info("QAT: qat_dev%d stopped %d acceleration engines\n", | ||
125 | accel_dev->accel_id, ae_ctr); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) | ||
130 | { | ||
131 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | ||
132 | |||
133 | qat_hal_reset(loader_data->fw_loader); | ||
134 | if (qat_hal_clr_reset(loader_data->fw_loader)) | ||
135 | return -EFAULT; | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | int adf_ae_init(struct adf_accel_dev *accel_dev) | ||
141 | { | ||
142 | struct adf_fw_loader_data *loader_data; | ||
143 | |||
144 | loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); | ||
145 | if (!loader_data) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | accel_dev->fw_loader = loader_data; | ||
149 | if (qat_hal_init(accel_dev)) { | ||
150 | pr_err("QAT: Failed to init the AEs\n"); | ||
151 | kfree(loader_data); | ||
152 | return -EFAULT; | ||
153 | } | ||
154 | if (adf_ae_reset(accel_dev, 0)) { | ||
155 | pr_err("QAT: Failed to reset the AEs\n"); | ||
156 | qat_hal_deinit(loader_data->fw_loader); | ||
157 | kfree(loader_data); | ||
158 | return -EFAULT; | ||
159 | } | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | int adf_ae_shutdown(struct adf_accel_dev *accel_dev) | ||
164 | { | ||
165 | kfree(accel_dev->fw_loader); | ||
166 | accel_dev->fw_loader = NULL; | ||
167 | return 0; | ||
168 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c new file mode 100644 index 000000000000..c29d4c3926bf --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_aer.c | |||
@@ -0,0 +1,259 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/pci.h> | ||
49 | #include <linux/aer.h> | ||
50 | #include <linux/completion.h> | ||
51 | #include <linux/workqueue.h> | ||
52 | #include <linux/delay.h> | ||
53 | #include "adf_accel_devices.h" | ||
54 | #include "adf_common_drv.h" | ||
55 | |||
56 | static struct workqueue_struct *device_reset_wq; | ||
57 | |||
58 | static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, | ||
59 | pci_channel_state_t state) | ||
60 | { | ||
61 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
62 | |||
63 | pr_info("QAT: Acceleration driver hardware error detected.\n"); | ||
64 | if (!accel_dev) { | ||
65 | pr_err("QAT: Can't find acceleration device\n"); | ||
66 | return PCI_ERS_RESULT_DISCONNECT; | ||
67 | } | ||
68 | |||
69 | if (state == pci_channel_io_perm_failure) { | ||
70 | pr_err("QAT: Can't recover from device error\n"); | ||
71 | return PCI_ERS_RESULT_DISCONNECT; | ||
72 | } | ||
73 | |||
74 | return PCI_ERS_RESULT_NEED_RESET; | ||
75 | } | ||
76 | |||
77 | /* reset dev data */ | ||
78 | struct adf_reset_dev_data { | ||
79 | int mode; | ||
80 | struct adf_accel_dev *accel_dev; | ||
81 | struct completion compl; | ||
82 | struct work_struct reset_work; | ||
83 | }; | ||
84 | |||
85 | #define PPDSTAT_OFFSET 0x7E | ||
86 | static void adf_dev_restore(struct adf_accel_dev *accel_dev) | ||
87 | { | ||
88 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
89 | struct pci_dev *parent = pdev->bus->self; | ||
90 | uint16_t ppdstat = 0, bridge_ctl = 0; | ||
91 | int pending = 0; | ||
92 | |||
93 | pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id); | ||
94 | pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); | ||
95 | pending = ppdstat & PCI_EXP_DEVSTA_TRPND; | ||
96 | if (pending) { | ||
97 | int ctr = 0; | ||
98 | |||
99 | do { | ||
100 | msleep(100); | ||
101 | pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); | ||
102 | pending = ppdstat & PCI_EXP_DEVSTA_TRPND; | ||
103 | } while (pending && ctr++ < 10); | ||
104 | } | ||
105 | |||
106 | if (pending) | ||
107 | pr_info("QAT: Transaction still in progress. Proceeding\n"); | ||
108 | |||
109 | pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); | ||
110 | bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
111 | pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); | ||
112 | msleep(100); | ||
113 | bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
114 | pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); | ||
115 | msleep(100); | ||
116 | pci_restore_state(pdev); | ||
117 | pci_save_state(pdev); | ||
118 | } | ||
119 | |||
120 | static void adf_device_reset_worker(struct work_struct *work) | ||
121 | { | ||
122 | struct adf_reset_dev_data *reset_data = | ||
123 | container_of(work, struct adf_reset_dev_data, reset_work); | ||
124 | struct adf_accel_dev *accel_dev = reset_data->accel_dev; | ||
125 | |||
126 | adf_dev_restarting_notify(accel_dev); | ||
127 | adf_dev_stop(accel_dev); | ||
128 | adf_dev_restore(accel_dev); | ||
129 | if (adf_dev_start(accel_dev)) { | ||
130 | /* The device hanged and we can't restart it so stop here */ | ||
131 | dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); | ||
132 | kfree(reset_data); | ||
133 | WARN(1, "QAT: device restart failed. Device is unusable\n"); | ||
134 | return; | ||
135 | } | ||
136 | adf_dev_restarted_notify(accel_dev); | ||
137 | clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); | ||
138 | |||
139 | /* The dev is back alive. Notify the caller if in sync mode */ | ||
140 | if (reset_data->mode == ADF_DEV_RESET_SYNC) | ||
141 | complete(&reset_data->compl); | ||
142 | else | ||
143 | kfree(reset_data); | ||
144 | } | ||
145 | |||
146 | static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, | ||
147 | enum adf_dev_reset_mode mode) | ||
148 | { | ||
149 | struct adf_reset_dev_data *reset_data; | ||
150 | |||
151 | if (adf_dev_started(accel_dev) && | ||
152 | !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) | ||
153 | return 0; | ||
154 | |||
155 | set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); | ||
156 | reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC); | ||
157 | if (!reset_data) | ||
158 | return -ENOMEM; | ||
159 | reset_data->accel_dev = accel_dev; | ||
160 | init_completion(&reset_data->compl); | ||
161 | reset_data->mode = mode; | ||
162 | INIT_WORK(&reset_data->reset_work, adf_device_reset_worker); | ||
163 | queue_work(device_reset_wq, &reset_data->reset_work); | ||
164 | |||
165 | /* If in sync mode wait for the result */ | ||
166 | if (mode == ADF_DEV_RESET_SYNC) { | ||
167 | int ret = 0; | ||
168 | /* Maximum device reset time is 10 seconds */ | ||
169 | unsigned long wait_jiffies = msecs_to_jiffies(10000); | ||
170 | unsigned long timeout = wait_for_completion_timeout( | ||
171 | &reset_data->compl, wait_jiffies); | ||
172 | if (!timeout) { | ||
173 | pr_err("QAT: Reset device timeout expired\n"); | ||
174 | ret = -EFAULT; | ||
175 | } | ||
176 | kfree(reset_data); | ||
177 | return ret; | ||
178 | } | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) | ||
183 | { | ||
184 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
185 | |||
186 | if (!accel_dev) { | ||
187 | pr_err("QAT: Can't find acceleration device\n"); | ||
188 | return PCI_ERS_RESULT_DISCONNECT; | ||
189 | } | ||
190 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
191 | if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) | ||
192 | return PCI_ERS_RESULT_DISCONNECT; | ||
193 | |||
194 | return PCI_ERS_RESULT_RECOVERED; | ||
195 | } | ||
196 | |||
197 | static void adf_resume(struct pci_dev *pdev) | ||
198 | { | ||
199 | pr_info("QAT: Acceleration driver reset completed\n"); | ||
200 | pr_info("QAT: Device is up and runnig\n"); | ||
201 | } | ||
202 | |||
203 | static struct pci_error_handlers adf_err_handler = { | ||
204 | .error_detected = adf_error_detected, | ||
205 | .slot_reset = adf_slot_reset, | ||
206 | .resume = adf_resume, | ||
207 | }; | ||
208 | |||
209 | /** | ||
210 | * adf_enable_aer() - Enable Advance Error Reporting for acceleration device | ||
211 | * @accel_dev: Pointer to acceleration device. | ||
212 | * @adf: PCI device driver owning the given acceleration device. | ||
213 | * | ||
214 | * Function enables PCI Advance Error Reporting for the | ||
215 | * QAT acceleration device accel_dev. | ||
216 | * To be used by QAT device specific drivers. | ||
217 | * | ||
218 | * Return: 0 on success, error code othewise. | ||
219 | */ | ||
220 | int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf) | ||
221 | { | ||
222 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
223 | |||
224 | adf->err_handler = &adf_err_handler; | ||
225 | pci_enable_pcie_error_reporting(pdev); | ||
226 | return 0; | ||
227 | } | ||
228 | EXPORT_SYMBOL_GPL(adf_enable_aer); | ||
229 | |||
230 | /** | ||
231 | * adf_disable_aer() - Enable Advance Error Reporting for acceleration device | ||
232 | * @accel_dev: Pointer to acceleration device. | ||
233 | * | ||
234 | * Function disables PCI Advance Error Reporting for the | ||
235 | * QAT acceleration device accel_dev. | ||
236 | * To be used by QAT device specific drivers. | ||
237 | * | ||
238 | * Return: void | ||
239 | */ | ||
240 | void adf_disable_aer(struct adf_accel_dev *accel_dev) | ||
241 | { | ||
242 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
243 | |||
244 | pci_disable_pcie_error_reporting(pdev); | ||
245 | } | ||
246 | EXPORT_SYMBOL_GPL(adf_disable_aer); | ||
247 | |||
248 | int adf_init_aer(void) | ||
249 | { | ||
250 | device_reset_wq = create_workqueue("qat_device_reset_wq"); | ||
251 | return (device_reset_wq == NULL) ? -EFAULT : 0; | ||
252 | } | ||
253 | |||
254 | void adf_exit_aer(void) | ||
255 | { | ||
256 | if (device_reset_wq) | ||
257 | destroy_workqueue(device_reset_wq); | ||
258 | device_reset_wq = NULL; | ||
259 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c new file mode 100644 index 000000000000..aba7f1d043fb --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/list.h> | ||
50 | #include <linux/seq_file.h> | ||
51 | #include "adf_accel_devices.h" | ||
52 | #include "adf_cfg.h" | ||
53 | |||
54 | static DEFINE_MUTEX(qat_cfg_read_lock); | ||
55 | |||
56 | static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos) | ||
57 | { | ||
58 | struct adf_cfg_device_data *dev_cfg = sfile->private; | ||
59 | |||
60 | mutex_lock(&qat_cfg_read_lock); | ||
61 | return seq_list_start(&dev_cfg->sec_list, *pos); | ||
62 | } | ||
63 | |||
64 | static int qat_dev_cfg_show(struct seq_file *sfile, void *v) | ||
65 | { | ||
66 | struct list_head *list; | ||
67 | struct adf_cfg_section *sec = | ||
68 | list_entry(v, struct adf_cfg_section, list); | ||
69 | |||
70 | seq_printf(sfile, "[%s]\n", sec->name); | ||
71 | list_for_each(list, &sec->param_head) { | ||
72 | struct adf_cfg_key_val *ptr = | ||
73 | list_entry(list, struct adf_cfg_key_val, list); | ||
74 | seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val); | ||
75 | } | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos) | ||
80 | { | ||
81 | struct adf_cfg_device_data *dev_cfg = sfile->private; | ||
82 | |||
83 | return seq_list_next(v, &dev_cfg->sec_list, pos); | ||
84 | } | ||
85 | |||
86 | static void qat_dev_cfg_stop(struct seq_file *sfile, void *v) | ||
87 | { | ||
88 | mutex_unlock(&qat_cfg_read_lock); | ||
89 | } | ||
90 | |||
91 | static const struct seq_operations qat_dev_cfg_sops = { | ||
92 | .start = qat_dev_cfg_start, | ||
93 | .next = qat_dev_cfg_next, | ||
94 | .stop = qat_dev_cfg_stop, | ||
95 | .show = qat_dev_cfg_show | ||
96 | }; | ||
97 | |||
98 | static int qat_dev_cfg_open(struct inode *inode, struct file *file) | ||
99 | { | ||
100 | int ret = seq_open(file, &qat_dev_cfg_sops); | ||
101 | |||
102 | if (!ret) { | ||
103 | struct seq_file *seq_f = file->private_data; | ||
104 | |||
105 | seq_f->private = inode->i_private; | ||
106 | } | ||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | static const struct file_operations qat_dev_cfg_fops = { | ||
111 | .open = qat_dev_cfg_open, | ||
112 | .read = seq_read, | ||
113 | .llseek = seq_lseek, | ||
114 | .release = seq_release | ||
115 | }; | ||
116 | |||
117 | /** | ||
118 | * adf_cfg_dev_add() - Create an acceleration device configuration table. | ||
119 | * @accel_dev: Pointer to acceleration device. | ||
120 | * | ||
121 | * Function creates a configuration table for the given acceleration device. | ||
122 | * The table stores device specific config values. | ||
123 | * To be used by QAT device specific drivers. | ||
124 | * | ||
125 | * Return: 0 on success, error code othewise. | ||
126 | */ | ||
127 | int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) | ||
128 | { | ||
129 | struct adf_cfg_device_data *dev_cfg_data; | ||
130 | |||
131 | dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL); | ||
132 | if (!dev_cfg_data) | ||
133 | return -ENOMEM; | ||
134 | INIT_LIST_HEAD(&dev_cfg_data->sec_list); | ||
135 | init_rwsem(&dev_cfg_data->lock); | ||
136 | accel_dev->cfg = dev_cfg_data; | ||
137 | |||
138 | /* accel_dev->debugfs_dir should always be non-NULL here */ | ||
139 | dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR, | ||
140 | accel_dev->debugfs_dir, | ||
141 | dev_cfg_data, | ||
142 | &qat_dev_cfg_fops); | ||
143 | if (!dev_cfg_data->debug) { | ||
144 | pr_err("QAT: Failed to create qat cfg debugfs entry.\n"); | ||
145 | kfree(dev_cfg_data); | ||
146 | accel_dev->cfg = NULL; | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | return 0; | ||
150 | } | ||
151 | EXPORT_SYMBOL_GPL(adf_cfg_dev_add); | ||
152 | |||
153 | static void adf_cfg_section_del_all(struct list_head *head); | ||
154 | |||
155 | void adf_cfg_del_all(struct adf_accel_dev *accel_dev) | ||
156 | { | ||
157 | struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; | ||
158 | |||
159 | down_write(&dev_cfg_data->lock); | ||
160 | adf_cfg_section_del_all(&dev_cfg_data->sec_list); | ||
161 | up_write(&dev_cfg_data->lock); | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * adf_cfg_dev_remove() - Clears acceleration device configuration table. | ||
166 | * @accel_dev: Pointer to acceleration device. | ||
167 | * | ||
168 | * Function removes configuration table from the given acceleration device | ||
169 | * and frees all allocated memory. | ||
170 | * To be used by QAT device specific drivers. | ||
171 | * | ||
172 | * Return: void | ||
173 | */ | ||
174 | void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev) | ||
175 | { | ||
176 | struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; | ||
177 | |||
178 | down_write(&dev_cfg_data->lock); | ||
179 | adf_cfg_section_del_all(&dev_cfg_data->sec_list); | ||
180 | up_write(&dev_cfg_data->lock); | ||
181 | debugfs_remove(dev_cfg_data->debug); | ||
182 | kfree(dev_cfg_data); | ||
183 | accel_dev->cfg = NULL; | ||
184 | } | ||
185 | EXPORT_SYMBOL_GPL(adf_cfg_dev_remove); | ||
186 | |||
187 | static void adf_cfg_keyval_add(struct adf_cfg_key_val *new, | ||
188 | struct adf_cfg_section *sec) | ||
189 | { | ||
190 | list_add_tail(&new->list, &sec->param_head); | ||
191 | } | ||
192 | |||
193 | static void adf_cfg_keyval_del_all(struct list_head *head) | ||
194 | { | ||
195 | struct list_head *list_ptr, *tmp; | ||
196 | |||
197 | list_for_each_prev_safe(list_ptr, tmp, head) { | ||
198 | struct adf_cfg_key_val *ptr = | ||
199 | list_entry(list_ptr, struct adf_cfg_key_val, list); | ||
200 | list_del(list_ptr); | ||
201 | kfree(ptr); | ||
202 | } | ||
203 | } | ||
204 | |||
205 | static void adf_cfg_section_del_all(struct list_head *head) | ||
206 | { | ||
207 | struct adf_cfg_section *ptr; | ||
208 | struct list_head *list, *tmp; | ||
209 | |||
210 | list_for_each_prev_safe(list, tmp, head) { | ||
211 | ptr = list_entry(list, struct adf_cfg_section, list); | ||
212 | adf_cfg_keyval_del_all(&ptr->param_head); | ||
213 | list_del(list); | ||
214 | kfree(ptr); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s, | ||
219 | const char *key) | ||
220 | { | ||
221 | struct list_head *list; | ||
222 | |||
223 | list_for_each(list, &s->param_head) { | ||
224 | struct adf_cfg_key_val *ptr = | ||
225 | list_entry(list, struct adf_cfg_key_val, list); | ||
226 | if (!strcmp(ptr->key, key)) | ||
227 | return ptr; | ||
228 | } | ||
229 | return NULL; | ||
230 | } | ||
231 | |||
232 | static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev, | ||
233 | const char *sec_name) | ||
234 | { | ||
235 | struct adf_cfg_device_data *cfg = accel_dev->cfg; | ||
236 | struct list_head *list; | ||
237 | |||
238 | list_for_each(list, &cfg->sec_list) { | ||
239 | struct adf_cfg_section *ptr = | ||
240 | list_entry(list, struct adf_cfg_section, list); | ||
241 | if (!strcmp(ptr->name, sec_name)) | ||
242 | return ptr; | ||
243 | } | ||
244 | return NULL; | ||
245 | } | ||
246 | |||
247 | static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, | ||
248 | const char *sec_name, | ||
249 | const char *key_name, | ||
250 | char *val) | ||
251 | { | ||
252 | struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name); | ||
253 | struct adf_cfg_key_val *keyval = NULL; | ||
254 | |||
255 | if (sec) | ||
256 | keyval = adf_cfg_key_value_find(sec, key_name); | ||
257 | if (keyval) { | ||
258 | memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES); | ||
259 | return 0; | ||
260 | } | ||
261 | return -1; | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * adf_cfg_add_key_value_param() - Add key-value config entry to config table. | ||
266 | * @accel_dev: Pointer to acceleration device. | ||
267 | * @section_name: Name of the section where the param will be added | ||
268 | * @key: The key string | ||
269 | * @val: Value pain for the given @key | ||
270 | * @type: Type - string, int or address | ||
271 | * | ||
272 | * Function adds configuration key - value entry in the appropriate section | ||
273 | * in the given acceleration device | ||
274 | * To be used by QAT device specific drivers. | ||
275 | * | ||
276 | * Return: 0 on success, error code othewise. | ||
277 | */ | ||
278 | int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, | ||
279 | const char *section_name, | ||
280 | const char *key, const void *val, | ||
281 | enum adf_cfg_val_type type) | ||
282 | { | ||
283 | struct adf_cfg_device_data *cfg = accel_dev->cfg; | ||
284 | struct adf_cfg_key_val *key_val; | ||
285 | struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, | ||
286 | section_name); | ||
287 | if (!section) | ||
288 | return -EFAULT; | ||
289 | |||
290 | key_val = kzalloc(sizeof(*key_val), GFP_KERNEL); | ||
291 | if (!key_val) | ||
292 | return -ENOMEM; | ||
293 | |||
294 | INIT_LIST_HEAD(&key_val->list); | ||
295 | strlcpy(key_val->key, key, sizeof(key_val->key)); | ||
296 | |||
297 | if (type == ADF_DEC) { | ||
298 | snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, | ||
299 | "%ld", (*((long *)val))); | ||
300 | } else if (type == ADF_STR) { | ||
301 | strlcpy(key_val->val, (char *)val, sizeof(key_val->val)); | ||
302 | } else if (type == ADF_HEX) { | ||
303 | snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, | ||
304 | "0x%lx", (unsigned long)val); | ||
305 | } else { | ||
306 | pr_err("QAT: Unknown type given.\n"); | ||
307 | kfree(key_val); | ||
308 | return -1; | ||
309 | } | ||
310 | key_val->type = type; | ||
311 | down_write(&cfg->lock); | ||
312 | adf_cfg_keyval_add(key_val, section); | ||
313 | up_write(&cfg->lock); | ||
314 | return 0; | ||
315 | } | ||
316 | EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param); | ||
317 | |||
318 | /** | ||
319 | * adf_cfg_section_add() - Add config section entry to config table. | ||
320 | * @accel_dev: Pointer to acceleration device. | ||
321 | * @name: Name of the section | ||
322 | * | ||
323 | * Function adds configuration section where key - value entries | ||
324 | * will be stored. | ||
325 | * To be used by QAT device specific drivers. | ||
326 | * | ||
327 | * Return: 0 on success, error code othewise. | ||
328 | */ | ||
329 | int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) | ||
330 | { | ||
331 | struct adf_cfg_device_data *cfg = accel_dev->cfg; | ||
332 | struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name); | ||
333 | |||
334 | if (sec) | ||
335 | return 0; | ||
336 | |||
337 | sec = kzalloc(sizeof(*sec), GFP_KERNEL); | ||
338 | if (!sec) | ||
339 | return -ENOMEM; | ||
340 | |||
341 | strlcpy(sec->name, name, sizeof(sec->name)); | ||
342 | INIT_LIST_HEAD(&sec->param_head); | ||
343 | down_write(&cfg->lock); | ||
344 | list_add_tail(&sec->list, &cfg->sec_list); | ||
345 | up_write(&cfg->lock); | ||
346 | return 0; | ||
347 | } | ||
348 | EXPORT_SYMBOL_GPL(adf_cfg_section_add); | ||
349 | |||
350 | int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, | ||
351 | const char *section, const char *name, | ||
352 | char *value) | ||
353 | { | ||
354 | struct adf_cfg_device_data *cfg = accel_dev->cfg; | ||
355 | int ret; | ||
356 | |||
357 | down_read(&cfg->lock); | ||
358 | ret = adf_cfg_key_val_get(accel_dev, section, name, value); | ||
359 | up_read(&cfg->lock); | ||
360 | return ret; | ||
361 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h new file mode 100644 index 000000000000..6a9c6f6b5ec9 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_CFG_H_ | ||
48 | #define ADF_CFG_H_ | ||
49 | |||
50 | #include <linux/list.h> | ||
51 | #include <linux/rwsem.h> | ||
52 | #include <linux/debugfs.h> | ||
53 | #include "adf_accel_devices.h" | ||
54 | #include "adf_cfg_common.h" | ||
55 | #include "adf_cfg_strings.h" | ||
56 | |||
57 | struct adf_cfg_key_val { | ||
58 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
59 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
60 | enum adf_cfg_val_type type; | ||
61 | struct list_head list; | ||
62 | }; | ||
63 | |||
64 | struct adf_cfg_section { | ||
65 | char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; | ||
66 | struct list_head list; | ||
67 | struct list_head param_head; | ||
68 | }; | ||
69 | |||
70 | struct adf_cfg_device_data { | ||
71 | struct list_head sec_list; | ||
72 | struct dentry *debug; | ||
73 | struct rw_semaphore lock; | ||
74 | }; | ||
75 | |||
76 | int adf_cfg_dev_add(struct adf_accel_dev *accel_dev); | ||
77 | void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev); | ||
78 | int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name); | ||
79 | void adf_cfg_del_all(struct adf_accel_dev *accel_dev); | ||
80 | int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, | ||
81 | const char *section_name, | ||
82 | const char *key, const void *val, | ||
83 | enum adf_cfg_val_type type); | ||
84 | int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev, | ||
85 | const char *section, const char *name, char *value); | ||
86 | |||
87 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h new file mode 100644 index 000000000000..88b82187ac35 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_CFG_COMMON_H_ | ||
48 | #define ADF_CFG_COMMON_H_ | ||
49 | |||
50 | #include <linux/types.h> | ||
51 | #include <linux/ioctl.h> | ||
52 | |||
53 | #define ADF_CFG_MAX_STR_LEN 64 | ||
54 | #define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN | ||
55 | #define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN | ||
56 | #define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN | ||
57 | #define ADF_CFG_BASE_DEC 10 | ||
58 | #define ADF_CFG_BASE_HEX 16 | ||
59 | #define ADF_CFG_ALL_DEVICES 0xFE | ||
60 | #define ADF_CFG_NO_DEVICE 0xFF | ||
61 | #define ADF_CFG_AFFINITY_WHATEVER 0xFF | ||
62 | #define MAX_DEVICE_NAME_SIZE 32 | ||
63 | #define ADF_MAX_DEVICES 32 | ||
64 | |||
65 | enum adf_cfg_val_type { | ||
66 | ADF_DEC, | ||
67 | ADF_HEX, | ||
68 | ADF_STR | ||
69 | }; | ||
70 | |||
71 | enum adf_device_type { | ||
72 | DEV_UNKNOWN = 0, | ||
73 | DEV_DH895XCC, | ||
74 | }; | ||
75 | |||
76 | struct adf_dev_status_info { | ||
77 | enum adf_device_type type; | ||
78 | uint8_t accel_id; | ||
79 | uint8_t instance_id; | ||
80 | uint8_t num_ae; | ||
81 | uint8_t num_accel; | ||
82 | uint8_t num_logical_accel; | ||
83 | uint8_t banks_per_accel; | ||
84 | uint8_t state; | ||
85 | uint8_t bus; | ||
86 | uint8_t dev; | ||
87 | uint8_t fun; | ||
88 | char name[MAX_DEVICE_NAME_SIZE]; | ||
89 | }; | ||
90 | |||
91 | #define ADF_CTL_IOC_MAGIC 'a' | ||
92 | #define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \ | ||
93 | struct adf_user_cfg_ctl_data) | ||
94 | #define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \ | ||
95 | struct adf_user_cfg_ctl_data) | ||
96 | #define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \ | ||
97 | struct adf_user_cfg_ctl_data) | ||
98 | #define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t) | ||
99 | #define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t) | ||
100 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h new file mode 100644 index 000000000000..c7ac758ebc90 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_CFG_STRINGS_H_ | ||
48 | #define ADF_CFG_STRINGS_H_ | ||
49 | |||
50 | #define ADF_GENERAL_SEC "GENERAL" | ||
51 | #define ADF_KERNEL_SEC "KERNEL" | ||
52 | #define ADF_ACCEL_SEC "Accelerator" | ||
53 | #define ADF_NUM_CY "NumberCyInstances" | ||
54 | #define ADF_NUM_DC "NumberDcInstances" | ||
55 | #define ADF_RING_SYM_SIZE "NumConcurrentSymRequests" | ||
56 | #define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests" | ||
57 | #define ADF_RING_DC_SIZE "NumConcurrentRequests" | ||
58 | #define ADF_RING_ASYM_TX "RingAsymTx" | ||
59 | #define ADF_RING_SYM_TX "RingSymTx" | ||
60 | #define ADF_RING_RND_TX "RingNrbgTx" | ||
61 | #define ADF_RING_ASYM_RX "RingAsymRx" | ||
62 | #define ADF_RING_SYM_RX "RinSymRx" | ||
63 | #define ADF_RING_RND_RX "RingNrbgRx" | ||
64 | #define ADF_RING_DC_TX "RingTx" | ||
65 | #define ADF_RING_DC_RX "RingRx" | ||
66 | #define ADF_ETRMGR_BANK "Bank" | ||
67 | #define ADF_RING_BANK_NUM "BankNumber" | ||
68 | #define ADF_CY "Cy" | ||
69 | #define ADF_DC "Dc" | ||
70 | #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" | ||
71 | #define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ | ||
72 | ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED | ||
73 | #define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs" | ||
74 | #define ADF_ETRMGR_COALESCE_TIMER_FORMAT \ | ||
75 | ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER | ||
76 | #define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses" | ||
77 | #define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \ | ||
78 | ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED | ||
79 | #define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity" | ||
80 | #define ADF_ETRMGR_CORE_AFFINITY_FORMAT \ | ||
81 | ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY | ||
82 | #define ADF_ACCEL_STR "Accelerator%d" | ||
83 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h new file mode 100644 index 000000000000..0c38a155a865 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_CFG_USER_H_ | ||
48 | #define ADF_CFG_USER_H_ | ||
49 | |||
50 | #include "adf_cfg_common.h" | ||
51 | #include "adf_cfg_strings.h" | ||
52 | |||
53 | struct adf_user_cfg_key_val { | ||
54 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
55 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
56 | union { | ||
57 | char *user_val_ptr; | ||
58 | uint64_t padding1; | ||
59 | }; | ||
60 | union { | ||
61 | struct adf_user_cfg_key_val *prev; | ||
62 | uint64_t padding2; | ||
63 | }; | ||
64 | union { | ||
65 | struct adf_user_cfg_key_val *next; | ||
66 | uint64_t padding3; | ||
67 | }; | ||
68 | enum adf_cfg_val_type type; | ||
69 | }; | ||
70 | |||
71 | struct adf_user_cfg_section { | ||
72 | char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; | ||
73 | union { | ||
74 | struct adf_user_cfg_key_val *params; | ||
75 | uint64_t padding1; | ||
76 | }; | ||
77 | union { | ||
78 | struct adf_user_cfg_section *prev; | ||
79 | uint64_t padding2; | ||
80 | }; | ||
81 | union { | ||
82 | struct adf_user_cfg_section *next; | ||
83 | uint64_t padding3; | ||
84 | }; | ||
85 | }; | ||
86 | |||
87 | struct adf_user_cfg_ctl_data { | ||
88 | union { | ||
89 | struct adf_user_cfg_section *config_section; | ||
90 | uint64_t padding; | ||
91 | }; | ||
92 | uint8_t device_id; | ||
93 | }; | ||
94 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h new file mode 100644 index 000000000000..5e8f9d431e5d --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_DRV_H | ||
48 | #define ADF_DRV_H | ||
49 | |||
50 | #include <linux/list.h> | ||
51 | #include <linux/pci.h> | ||
52 | #include "adf_accel_devices.h" | ||
53 | #include "icp_qat_fw_loader_handle.h" | ||
54 | #include "icp_qat_hal.h" | ||
55 | |||
56 | #define ADF_STATUS_RESTARTING 0 | ||
57 | #define ADF_STATUS_STARTING 1 | ||
58 | #define ADF_STATUS_CONFIGURED 2 | ||
59 | #define ADF_STATUS_STARTED 3 | ||
60 | #define ADF_STATUS_AE_INITIALISED 4 | ||
61 | #define ADF_STATUS_AE_UCODE_LOADED 5 | ||
62 | #define ADF_STATUS_AE_STARTED 6 | ||
63 | #define ADF_STATUS_ORPHAN_TH_RUNNING 7 | ||
64 | #define ADF_STATUS_IRQ_ALLOCATED 8 | ||
65 | |||
66 | enum adf_dev_reset_mode { | ||
67 | ADF_DEV_RESET_ASYNC = 0, | ||
68 | ADF_DEV_RESET_SYNC | ||
69 | }; | ||
70 | |||
71 | enum adf_event { | ||
72 | ADF_EVENT_INIT = 0, | ||
73 | ADF_EVENT_START, | ||
74 | ADF_EVENT_STOP, | ||
75 | ADF_EVENT_SHUTDOWN, | ||
76 | ADF_EVENT_RESTARTING, | ||
77 | ADF_EVENT_RESTARTED, | ||
78 | }; | ||
79 | |||
80 | struct service_hndl { | ||
81 | int (*event_hld)(struct adf_accel_dev *accel_dev, | ||
82 | enum adf_event event); | ||
83 | unsigned long init_status; | ||
84 | unsigned long start_status; | ||
85 | char *name; | ||
86 | struct list_head list; | ||
87 | int admin; | ||
88 | }; | ||
89 | |||
90 | int adf_service_register(struct service_hndl *service); | ||
91 | int adf_service_unregister(struct service_hndl *service); | ||
92 | |||
93 | int adf_dev_init(struct adf_accel_dev *accel_dev); | ||
94 | int adf_dev_start(struct adf_accel_dev *accel_dev); | ||
95 | int adf_dev_stop(struct adf_accel_dev *accel_dev); | ||
96 | int adf_dev_shutdown(struct adf_accel_dev *accel_dev); | ||
97 | |||
98 | int adf_ctl_dev_register(void); | ||
99 | void adf_ctl_dev_unregister(void); | ||
100 | int adf_processes_dev_register(void); | ||
101 | void adf_processes_dev_unregister(void); | ||
102 | |||
103 | int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev); | ||
104 | void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev); | ||
105 | struct list_head *adf_devmgr_get_head(void); | ||
106 | struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); | ||
107 | struct adf_accel_dev *adf_devmgr_get_first(void); | ||
108 | struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev); | ||
109 | int adf_devmgr_verify_id(uint32_t id); | ||
110 | void adf_devmgr_get_num_dev(uint32_t *num); | ||
111 | int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); | ||
112 | int adf_dev_started(struct adf_accel_dev *accel_dev); | ||
113 | int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); | ||
114 | int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev); | ||
115 | int adf_ae_init(struct adf_accel_dev *accel_dev); | ||
116 | int adf_ae_shutdown(struct adf_accel_dev *accel_dev); | ||
117 | int adf_ae_fw_load(struct adf_accel_dev *accel_dev); | ||
118 | int adf_ae_fw_release(struct adf_accel_dev *accel_dev); | ||
119 | int adf_ae_start(struct adf_accel_dev *accel_dev); | ||
120 | int adf_ae_stop(struct adf_accel_dev *accel_dev); | ||
121 | |||
122 | int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); | ||
123 | void adf_disable_aer(struct adf_accel_dev *accel_dev); | ||
124 | int adf_init_aer(void); | ||
125 | void adf_exit_aer(void); | ||
126 | |||
127 | int adf_dev_get(struct adf_accel_dev *accel_dev); | ||
128 | void adf_dev_put(struct adf_accel_dev *accel_dev); | ||
129 | int adf_dev_in_use(struct adf_accel_dev *accel_dev); | ||
130 | int adf_init_etr_data(struct adf_accel_dev *accel_dev); | ||
131 | void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); | ||
132 | int qat_crypto_register(void); | ||
133 | int qat_crypto_unregister(void); | ||
134 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node); | ||
135 | void qat_crypto_put_instance(struct qat_crypto_instance *inst); | ||
136 | void qat_alg_callback(void *resp); | ||
137 | int qat_algs_init(void); | ||
138 | void qat_algs_exit(void); | ||
139 | int qat_algs_register(void); | ||
140 | int qat_algs_unregister(void); | ||
141 | |||
142 | int qat_hal_init(struct adf_accel_dev *accel_dev); | ||
143 | void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); | ||
144 | void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
145 | unsigned int ctx_mask); | ||
146 | void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
147 | unsigned int ctx_mask); | ||
148 | void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); | ||
149 | int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); | ||
150 | void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, | ||
151 | unsigned char ae, unsigned int ctx_mask); | ||
152 | int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, | ||
153 | unsigned char ae, enum icp_qat_uof_regtype lm_type, | ||
154 | unsigned char mode); | ||
155 | int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, | ||
156 | unsigned char ae, unsigned char mode); | ||
157 | int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, | ||
158 | unsigned char ae, unsigned char mode); | ||
159 | void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, | ||
160 | unsigned char ae, unsigned int ctx_mask, unsigned int upc); | ||
161 | void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, | ||
162 | unsigned char ae, unsigned int uaddr, | ||
163 | unsigned int words_num, uint64_t *uword); | ||
164 | void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
165 | unsigned int uword_addr, unsigned int words_num, | ||
166 | unsigned int *data); | ||
167 | int qat_hal_get_ins_num(void); | ||
168 | int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, | ||
169 | unsigned char ae, | ||
170 | struct icp_qat_uof_batch_init *lm_init_header); | ||
171 | int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, | ||
172 | unsigned char ae, unsigned char ctx_mask, | ||
173 | enum icp_qat_uof_regtype reg_type, | ||
174 | unsigned short reg_num, unsigned int regdata); | ||
175 | int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, | ||
176 | unsigned char ae, unsigned char ctx_mask, | ||
177 | enum icp_qat_uof_regtype reg_type, | ||
178 | unsigned short reg_num, unsigned int regdata); | ||
179 | int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, | ||
180 | unsigned char ae, unsigned char ctx_mask, | ||
181 | enum icp_qat_uof_regtype reg_type, | ||
182 | unsigned short reg_num, unsigned int regdata); | ||
183 | int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, | ||
184 | unsigned char ae, unsigned char ctx_mask, | ||
185 | unsigned short reg_num, unsigned int regdata); | ||
186 | int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, | ||
187 | unsigned char ae, unsigned short lm_addr, unsigned int value); | ||
188 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); | ||
189 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); | ||
190 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | ||
191 | void *addr_ptr, int mem_size); | ||
192 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c new file mode 100644 index 000000000000..d97069b8a8e4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c | |||
@@ -0,0 +1,490 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/mutex.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/fs.h> | ||
51 | #include <linux/bitops.h> | ||
52 | #include <linux/pci.h> | ||
53 | #include <linux/cdev.h> | ||
54 | #include <linux/uaccess.h> | ||
55 | |||
56 | #include "adf_accel_devices.h" | ||
57 | #include "adf_common_drv.h" | ||
58 | #include "adf_cfg.h" | ||
59 | #include "adf_cfg_common.h" | ||
60 | #include "adf_cfg_user.h" | ||
61 | |||
62 | #define DEVICE_NAME "qat_adf_ctl" | ||
63 | |||
64 | static DEFINE_MUTEX(adf_ctl_lock); | ||
65 | static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); | ||
66 | |||
67 | static const struct file_operations adf_ctl_ops = { | ||
68 | .owner = THIS_MODULE, | ||
69 | .unlocked_ioctl = adf_ctl_ioctl, | ||
70 | .compat_ioctl = adf_ctl_ioctl, | ||
71 | }; | ||
72 | |||
73 | struct adf_ctl_drv_info { | ||
74 | unsigned int major; | ||
75 | struct cdev drv_cdev; | ||
76 | struct class *drv_class; | ||
77 | }; | ||
78 | |||
79 | static struct adf_ctl_drv_info adt_ctl_drv; | ||
80 | |||
81 | static void adf_chr_drv_destroy(void) | ||
82 | { | ||
83 | device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0)); | ||
84 | cdev_del(&adt_ctl_drv.drv_cdev); | ||
85 | class_destroy(adt_ctl_drv.drv_class); | ||
86 | unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1); | ||
87 | } | ||
88 | |||
89 | static int adf_chr_drv_create(void) | ||
90 | { | ||
91 | dev_t dev_id; | ||
92 | struct device *drv_device; | ||
93 | |||
94 | if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) { | ||
95 | pr_err("QAT: unable to allocate chrdev region\n"); | ||
96 | return -EFAULT; | ||
97 | } | ||
98 | |||
99 | adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME); | ||
100 | if (IS_ERR(adt_ctl_drv.drv_class)) { | ||
101 | pr_err("QAT: class_create failed for adf_ctl\n"); | ||
102 | goto err_chrdev_unreg; | ||
103 | } | ||
104 | adt_ctl_drv.major = MAJOR(dev_id); | ||
105 | cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops); | ||
106 | if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) { | ||
107 | pr_err("QAT: cdev add failed\n"); | ||
108 | goto err_class_destr; | ||
109 | } | ||
110 | |||
111 | drv_device = device_create(adt_ctl_drv.drv_class, NULL, | ||
112 | MKDEV(adt_ctl_drv.major, 0), | ||
113 | NULL, DEVICE_NAME); | ||
114 | if (!drv_device) { | ||
115 | pr_err("QAT: failed to create device\n"); | ||
116 | goto err_cdev_del; | ||
117 | } | ||
118 | return 0; | ||
119 | err_cdev_del: | ||
120 | cdev_del(&adt_ctl_drv.drv_cdev); | ||
121 | err_class_destr: | ||
122 | class_destroy(adt_ctl_drv.drv_class); | ||
123 | err_chrdev_unreg: | ||
124 | unregister_chrdev_region(dev_id, 1); | ||
125 | return -EFAULT; | ||
126 | } | ||
127 | |||
128 | static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data, | ||
129 | unsigned long arg) | ||
130 | { | ||
131 | struct adf_user_cfg_ctl_data *cfg_data; | ||
132 | |||
133 | cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL); | ||
134 | if (!cfg_data) | ||
135 | return -ENOMEM; | ||
136 | |||
137 | /* Initialize device id to NO DEVICE as 0 is a valid device id */ | ||
138 | cfg_data->device_id = ADF_CFG_NO_DEVICE; | ||
139 | |||
140 | if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) { | ||
141 | pr_err("QAT: failed to copy from user cfg_data.\n"); | ||
142 | kfree(cfg_data); | ||
143 | return -EIO; | ||
144 | } | ||
145 | |||
146 | *ctl_data = cfg_data; | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int adf_add_key_value_data(struct adf_accel_dev *accel_dev, | ||
151 | const char *section, | ||
152 | const struct adf_user_cfg_key_val *key_val) | ||
153 | { | ||
154 | if (key_val->type == ADF_HEX) { | ||
155 | long *ptr = (long *)key_val->val; | ||
156 | long val = *ptr; | ||
157 | |||
158 | if (adf_cfg_add_key_value_param(accel_dev, section, | ||
159 | key_val->key, (void *)val, | ||
160 | key_val->type)) { | ||
161 | pr_err("QAT: failed to add keyvalue.\n"); | ||
162 | return -EFAULT; | ||
163 | } | ||
164 | } else { | ||
165 | if (adf_cfg_add_key_value_param(accel_dev, section, | ||
166 | key_val->key, key_val->val, | ||
167 | key_val->type)) { | ||
168 | pr_err("QAT: failed to add keyvalue.\n"); | ||
169 | return -EFAULT; | ||
170 | } | ||
171 | } | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, | ||
176 | struct adf_user_cfg_ctl_data *ctl_data) | ||
177 | { | ||
178 | struct adf_user_cfg_key_val key_val; | ||
179 | struct adf_user_cfg_key_val *params_head; | ||
180 | struct adf_user_cfg_section section, *section_head; | ||
181 | |||
182 | section_head = ctl_data->config_section; | ||
183 | |||
184 | while (section_head) { | ||
185 | if (copy_from_user(§ion, (void __user *)section_head, | ||
186 | sizeof(*section_head))) { | ||
187 | pr_err("QAT: failed to copy section info\n"); | ||
188 | goto out_err; | ||
189 | } | ||
190 | |||
191 | if (adf_cfg_section_add(accel_dev, section.name)) { | ||
192 | pr_err("QAT: failed to add section.\n"); | ||
193 | goto out_err; | ||
194 | } | ||
195 | |||
196 | params_head = section_head->params; | ||
197 | |||
198 | while (params_head) { | ||
199 | if (copy_from_user(&key_val, (void __user *)params_head, | ||
200 | sizeof(key_val))) { | ||
201 | pr_err("QAT: Failed to copy keyvalue.\n"); | ||
202 | goto out_err; | ||
203 | } | ||
204 | if (adf_add_key_value_data(accel_dev, section.name, | ||
205 | &key_val)) { | ||
206 | goto out_err; | ||
207 | } | ||
208 | params_head = key_val.next; | ||
209 | } | ||
210 | section_head = section.next; | ||
211 | } | ||
212 | return 0; | ||
213 | out_err: | ||
214 | adf_cfg_del_all(accel_dev); | ||
215 | return -EFAULT; | ||
216 | } | ||
217 | |||
218 | static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd, | ||
219 | unsigned long arg) | ||
220 | { | ||
221 | int ret; | ||
222 | struct adf_user_cfg_ctl_data *ctl_data; | ||
223 | struct adf_accel_dev *accel_dev; | ||
224 | |||
225 | ret = adf_ctl_alloc_resources(&ctl_data, arg); | ||
226 | if (ret) | ||
227 | return ret; | ||
228 | |||
229 | accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); | ||
230 | if (!accel_dev) { | ||
231 | ret = -EFAULT; | ||
232 | goto out; | ||
233 | } | ||
234 | |||
235 | if (adf_dev_started(accel_dev)) { | ||
236 | ret = -EFAULT; | ||
237 | goto out; | ||
238 | } | ||
239 | |||
240 | if (adf_copy_key_value_data(accel_dev, ctl_data)) { | ||
241 | ret = -EFAULT; | ||
242 | goto out; | ||
243 | } | ||
244 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
245 | out: | ||
246 | kfree(ctl_data); | ||
247 | return ret; | ||
248 | } | ||
249 | |||
250 | static int adf_ctl_is_device_in_use(int id) | ||
251 | { | ||
252 | struct list_head *itr, *head = adf_devmgr_get_head(); | ||
253 | |||
254 | list_for_each(itr, head) { | ||
255 | struct adf_accel_dev *dev = | ||
256 | list_entry(itr, struct adf_accel_dev, list); | ||
257 | |||
258 | if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { | ||
259 | if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { | ||
260 | pr_info("QAT: device qat_dev%d is busy\n", | ||
261 | dev->accel_id); | ||
262 | return -EBUSY; | ||
263 | } | ||
264 | } | ||
265 | } | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int adf_ctl_stop_devices(uint32_t id) | ||
270 | { | ||
271 | struct list_head *itr, *head = adf_devmgr_get_head(); | ||
272 | int ret = 0; | ||
273 | |||
274 | list_for_each(itr, head) { | ||
275 | struct adf_accel_dev *accel_dev = | ||
276 | list_entry(itr, struct adf_accel_dev, list); | ||
277 | if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { | ||
278 | if (!adf_dev_started(accel_dev)) | ||
279 | continue; | ||
280 | |||
281 | if (adf_dev_stop(accel_dev)) { | ||
282 | pr_err("QAT: Failed to stop qat_dev%d\n", id); | ||
283 | ret = -EFAULT; | ||
284 | } | ||
285 | } | ||
286 | } | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd, | ||
291 | unsigned long arg) | ||
292 | { | ||
293 | int ret; | ||
294 | struct adf_user_cfg_ctl_data *ctl_data; | ||
295 | |||
296 | ret = adf_ctl_alloc_resources(&ctl_data, arg); | ||
297 | if (ret) | ||
298 | return ret; | ||
299 | |||
300 | if (adf_devmgr_verify_id(ctl_data->device_id)) { | ||
301 | pr_err("QAT: Device %d not found\n", ctl_data->device_id); | ||
302 | ret = -ENODEV; | ||
303 | goto out; | ||
304 | } | ||
305 | |||
306 | ret = adf_ctl_is_device_in_use(ctl_data->device_id); | ||
307 | if (ret) | ||
308 | goto out; | ||
309 | |||
310 | if (ctl_data->device_id == ADF_CFG_ALL_DEVICES) | ||
311 | pr_info("QAT: Stopping all acceleration devices.\n"); | ||
312 | else | ||
313 | pr_info("QAT: Stopping acceleration device qat_dev%d.\n", | ||
314 | ctl_data->device_id); | ||
315 | |||
316 | ret = adf_ctl_stop_devices(ctl_data->device_id); | ||
317 | if (ret) | ||
318 | pr_err("QAT: failed to stop device.\n"); | ||
319 | out: | ||
320 | kfree(ctl_data); | ||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, | ||
325 | unsigned long arg) | ||
326 | { | ||
327 | int ret; | ||
328 | struct adf_user_cfg_ctl_data *ctl_data; | ||
329 | struct adf_accel_dev *accel_dev; | ||
330 | |||
331 | ret = adf_ctl_alloc_resources(&ctl_data, arg); | ||
332 | if (ret) | ||
333 | return ret; | ||
334 | |||
335 | accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); | ||
336 | if (!accel_dev) { | ||
337 | pr_err("QAT: Device %d not found\n", ctl_data->device_id); | ||
338 | ret = -ENODEV; | ||
339 | goto out; | ||
340 | } | ||
341 | |||
342 | if (!adf_dev_started(accel_dev)) { | ||
343 | pr_info("QAT: Starting acceleration device qat_dev%d.\n", | ||
344 | ctl_data->device_id); | ||
345 | ret = adf_dev_start(accel_dev); | ||
346 | } else { | ||
347 | pr_info("QAT: Acceleration device qat_dev%d already started.\n", | ||
348 | ctl_data->device_id); | ||
349 | } | ||
350 | if (ret) { | ||
351 | pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id); | ||
352 | adf_dev_stop(accel_dev); | ||
353 | } | ||
354 | out: | ||
355 | kfree(ctl_data); | ||
356 | return ret; | ||
357 | } | ||
358 | |||
359 | static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, | ||
360 | unsigned long arg) | ||
361 | { | ||
362 | uint32_t num_devices = 0; | ||
363 | |||
364 | adf_devmgr_get_num_dev(&num_devices); | ||
365 | if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) | ||
366 | return -EFAULT; | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, | ||
372 | unsigned long arg) | ||
373 | { | ||
374 | struct adf_hw_device_data *hw_data; | ||
375 | struct adf_dev_status_info dev_info; | ||
376 | struct adf_accel_dev *accel_dev; | ||
377 | |||
378 | if (copy_from_user(&dev_info, (void __user *)arg, | ||
379 | sizeof(struct adf_dev_status_info))) { | ||
380 | pr_err("QAT: failed to copy from user.\n"); | ||
381 | return -EFAULT; | ||
382 | } | ||
383 | |||
384 | accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); | ||
385 | if (!accel_dev) { | ||
386 | pr_err("QAT: Device %d not found\n", dev_info.accel_id); | ||
387 | return -ENODEV; | ||
388 | } | ||
389 | hw_data = accel_dev->hw_device; | ||
390 | dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; | ||
391 | dev_info.num_ae = hw_data->get_num_aes(hw_data); | ||
392 | dev_info.num_accel = hw_data->get_num_accels(hw_data); | ||
393 | dev_info.num_logical_accel = hw_data->num_logical_accel; | ||
394 | dev_info.banks_per_accel = hw_data->num_banks | ||
395 | / hw_data->num_logical_accel; | ||
396 | strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); | ||
397 | dev_info.instance_id = hw_data->instance_id; | ||
398 | dev_info.type = hw_data->dev_class->type; | ||
399 | dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number; | ||
400 | dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn); | ||
401 | dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn); | ||
402 | |||
403 | if (copy_to_user((void __user *)arg, &dev_info, | ||
404 | sizeof(struct adf_dev_status_info))) { | ||
405 | pr_err("QAT: failed to copy status.\n"); | ||
406 | return -EFAULT; | ||
407 | } | ||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | ||
412 | { | ||
413 | int ret; | ||
414 | |||
415 | if (mutex_lock_interruptible(&adf_ctl_lock)) | ||
416 | return -EFAULT; | ||
417 | |||
418 | switch (cmd) { | ||
419 | case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS: | ||
420 | ret = adf_ctl_ioctl_dev_config(fp, cmd, arg); | ||
421 | break; | ||
422 | |||
423 | case IOCTL_STOP_ACCEL_DEV: | ||
424 | ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg); | ||
425 | break; | ||
426 | |||
427 | case IOCTL_START_ACCEL_DEV: | ||
428 | ret = adf_ctl_ioctl_dev_start(fp, cmd, arg); | ||
429 | break; | ||
430 | |||
431 | case IOCTL_GET_NUM_DEVICES: | ||
432 | ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg); | ||
433 | break; | ||
434 | |||
435 | case IOCTL_STATUS_ACCEL_DEV: | ||
436 | ret = adf_ctl_ioctl_get_status(fp, cmd, arg); | ||
437 | break; | ||
438 | default: | ||
439 | pr_err("QAT: Invalid ioclt\n"); | ||
440 | ret = -EFAULT; | ||
441 | break; | ||
442 | } | ||
443 | mutex_unlock(&adf_ctl_lock); | ||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | static int __init adf_register_ctl_device_driver(void) | ||
448 | { | ||
449 | mutex_init(&adf_ctl_lock); | ||
450 | |||
451 | if (qat_algs_init()) | ||
452 | goto err_algs_init; | ||
453 | |||
454 | if (adf_chr_drv_create()) | ||
455 | goto err_chr_dev; | ||
456 | |||
457 | if (adf_init_aer()) | ||
458 | goto err_aer; | ||
459 | |||
460 | if (qat_crypto_register()) | ||
461 | goto err_crypto_register; | ||
462 | |||
463 | return 0; | ||
464 | |||
465 | err_crypto_register: | ||
466 | adf_exit_aer(); | ||
467 | err_aer: | ||
468 | adf_chr_drv_destroy(); | ||
469 | err_chr_dev: | ||
470 | qat_algs_exit(); | ||
471 | err_algs_init: | ||
472 | mutex_destroy(&adf_ctl_lock); | ||
473 | return -EFAULT; | ||
474 | } | ||
475 | |||
476 | static void __exit adf_unregister_ctl_device_driver(void) | ||
477 | { | ||
478 | adf_chr_drv_destroy(); | ||
479 | adf_exit_aer(); | ||
480 | qat_crypto_unregister(); | ||
481 | qat_algs_exit(); | ||
482 | mutex_destroy(&adf_ctl_lock); | ||
483 | } | ||
484 | |||
485 | module_init(adf_register_ctl_device_driver); | ||
486 | module_exit(adf_unregister_ctl_device_driver); | ||
487 | MODULE_LICENSE("Dual BSD/GPL"); | ||
488 | MODULE_AUTHOR("Intel"); | ||
489 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
490 | MODULE_ALIAS("intel_qat"); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c new file mode 100644 index 000000000000..ae71555c0868 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/list.h> | ||
49 | #include "adf_cfg.h" | ||
50 | #include "adf_common_drv.h" | ||
51 | |||
52 | static LIST_HEAD(accel_table); | ||
53 | static DEFINE_MUTEX(table_lock); | ||
54 | static uint32_t num_devices; | ||
55 | |||
56 | /** | ||
57 | * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework | ||
58 | * @accel_dev: Pointer to acceleration device. | ||
59 | * | ||
60 | * Function adds acceleration device to the acceleration framework. | ||
61 | * To be used by QAT device specific drivers. | ||
62 | * | ||
63 | * Return: 0 on success, error code othewise. | ||
64 | */ | ||
65 | int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) | ||
66 | { | ||
67 | struct list_head *itr; | ||
68 | |||
69 | if (num_devices == ADF_MAX_DEVICES) { | ||
70 | pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES); | ||
71 | return -EFAULT; | ||
72 | } | ||
73 | |||
74 | mutex_lock(&table_lock); | ||
75 | list_for_each(itr, &accel_table) { | ||
76 | struct adf_accel_dev *ptr = | ||
77 | list_entry(itr, struct adf_accel_dev, list); | ||
78 | |||
79 | if (ptr == accel_dev) { | ||
80 | mutex_unlock(&table_lock); | ||
81 | return -EEXIST; | ||
82 | } | ||
83 | } | ||
84 | atomic_set(&accel_dev->ref_count, 0); | ||
85 | list_add_tail(&accel_dev->list, &accel_table); | ||
86 | accel_dev->accel_id = num_devices++; | ||
87 | mutex_unlock(&table_lock); | ||
88 | return 0; | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); | ||
91 | |||
92 | struct list_head *adf_devmgr_get_head(void) | ||
93 | { | ||
94 | return &accel_table; | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. | ||
99 | * @accel_dev: Pointer to acceleration device. | ||
100 | * | ||
101 | * Function removes acceleration device from the acceleration framework. | ||
102 | * To be used by QAT device specific drivers. | ||
103 | * | ||
104 | * Return: void | ||
105 | */ | ||
106 | void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev) | ||
107 | { | ||
108 | mutex_lock(&table_lock); | ||
109 | list_del(&accel_dev->list); | ||
110 | num_devices--; | ||
111 | mutex_unlock(&table_lock); | ||
112 | } | ||
113 | EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); | ||
114 | |||
115 | struct adf_accel_dev *adf_devmgr_get_first(void) | ||
116 | { | ||
117 | struct adf_accel_dev *dev = NULL; | ||
118 | |||
119 | if (!list_empty(&accel_table)) | ||
120 | dev = list_first_entry(&accel_table, struct adf_accel_dev, | ||
121 | list); | ||
122 | return dev; | ||
123 | } | ||
124 | |||
125 | /** | ||
126 | * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev. | ||
127 | * @accel_dev: Pointer to pci device. | ||
128 | * | ||
129 | * Function returns acceleration device associated with the given pci device. | ||
130 | * To be used by QAT device specific drivers. | ||
131 | * | ||
132 | * Return: pinter to accel_dev or NULL if not found. | ||
133 | */ | ||
134 | struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) | ||
135 | { | ||
136 | struct list_head *itr; | ||
137 | |||
138 | list_for_each(itr, &accel_table) { | ||
139 | struct adf_accel_dev *ptr = | ||
140 | list_entry(itr, struct adf_accel_dev, list); | ||
141 | |||
142 | if (ptr->accel_pci_dev.pci_dev == pci_dev) { | ||
143 | mutex_unlock(&table_lock); | ||
144 | return ptr; | ||
145 | } | ||
146 | } | ||
147 | return NULL; | ||
148 | } | ||
149 | EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); | ||
150 | |||
151 | struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) | ||
152 | { | ||
153 | struct list_head *itr; | ||
154 | |||
155 | list_for_each(itr, &accel_table) { | ||
156 | struct adf_accel_dev *ptr = | ||
157 | list_entry(itr, struct adf_accel_dev, list); | ||
158 | |||
159 | if (ptr->accel_id == id) { | ||
160 | mutex_unlock(&table_lock); | ||
161 | return ptr; | ||
162 | } | ||
163 | } | ||
164 | return NULL; | ||
165 | } | ||
166 | |||
167 | int adf_devmgr_verify_id(uint32_t id) | ||
168 | { | ||
169 | if (id == ADF_CFG_ALL_DEVICES) | ||
170 | return 0; | ||
171 | |||
172 | if (adf_devmgr_get_dev_by_id(id)) | ||
173 | return 0; | ||
174 | |||
175 | return -ENODEV; | ||
176 | } | ||
177 | |||
178 | void adf_devmgr_get_num_dev(uint32_t *num) | ||
179 | { | ||
180 | struct list_head *itr; | ||
181 | |||
182 | *num = 0; | ||
183 | list_for_each(itr, &accel_table) { | ||
184 | (*num)++; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | int adf_dev_in_use(struct adf_accel_dev *accel_dev) | ||
189 | { | ||
190 | return atomic_read(&accel_dev->ref_count) != 0; | ||
191 | } | ||
192 | |||
193 | int adf_dev_get(struct adf_accel_dev *accel_dev) | ||
194 | { | ||
195 | if (atomic_add_return(1, &accel_dev->ref_count) == 1) | ||
196 | if (!try_module_get(accel_dev->owner)) | ||
197 | return -EFAULT; | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | void adf_dev_put(struct adf_accel_dev *accel_dev) | ||
202 | { | ||
203 | if (atomic_sub_return(1, &accel_dev->ref_count) == 0) | ||
204 | module_put(accel_dev->owner); | ||
205 | } | ||
206 | |||
207 | int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) | ||
208 | { | ||
209 | return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); | ||
210 | } | ||
211 | |||
212 | int adf_dev_started(struct adf_accel_dev *accel_dev) | ||
213 | { | ||
214 | return test_bit(ADF_STATUS_STARTED, &accel_dev->status); | ||
215 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c new file mode 100644 index 000000000000..5c0e47a00a87 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_init.c | |||
@@ -0,0 +1,388 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/list.h> | ||
49 | #include <linux/bitops.h> | ||
50 | #include <linux/delay.h> | ||
51 | #include "adf_accel_devices.h" | ||
52 | #include "adf_cfg.h" | ||
53 | #include "adf_common_drv.h" | ||
54 | |||
55 | static LIST_HEAD(service_table); | ||
56 | static DEFINE_MUTEX(service_lock); | ||
57 | |||
58 | static void adf_service_add(struct service_hndl *service) | ||
59 | { | ||
60 | mutex_lock(&service_lock); | ||
61 | list_add(&service->list, &service_table); | ||
62 | mutex_unlock(&service_lock); | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * adf_service_register() - Register acceleration service in the accel framework | ||
67 | * @service: Pointer to the service | ||
68 | * | ||
69 | * Function adds the acceleration service to the acceleration framework. | ||
70 | * To be used by QAT device specific drivers. | ||
71 | * | ||
72 | * Return: 0 on success, error code othewise. | ||
73 | */ | ||
74 | int adf_service_register(struct service_hndl *service) | ||
75 | { | ||
76 | service->init_status = 0; | ||
77 | service->start_status = 0; | ||
78 | adf_service_add(service); | ||
79 | return 0; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(adf_service_register); | ||
82 | |||
83 | static void adf_service_remove(struct service_hndl *service) | ||
84 | { | ||
85 | mutex_lock(&service_lock); | ||
86 | list_del(&service->list); | ||
87 | mutex_unlock(&service_lock); | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * adf_service_unregister() - Unregister acceleration service from the framework | ||
92 | * @service: Pointer to the service | ||
93 | * | ||
94 | * Function remove the acceleration service from the acceleration framework. | ||
95 | * To be used by QAT device specific drivers. | ||
96 | * | ||
97 | * Return: 0 on success, error code othewise. | ||
98 | */ | ||
99 | int adf_service_unregister(struct service_hndl *service) | ||
100 | { | ||
101 | if (service->init_status || service->start_status) { | ||
102 | pr_err("QAT: Could not remove active service\n"); | ||
103 | return -EFAULT; | ||
104 | } | ||
105 | adf_service_remove(service); | ||
106 | return 0; | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(adf_service_unregister); | ||
109 | |||
110 | /** | ||
111 | * adf_dev_start() - Start acceleration service for the given accel device | ||
112 | * @accel_dev: Pointer to acceleration device. | ||
113 | * | ||
114 | * Function notifies all the registered services that the acceleration device | ||
115 | * is ready to be used. | ||
116 | * To be used by QAT device specific drivers. | ||
117 | * | ||
118 | * Return: 0 on success, error code othewise. | ||
119 | */ | ||
120 | int adf_dev_start(struct adf_accel_dev *accel_dev) | ||
121 | { | ||
122 | struct service_hndl *service; | ||
123 | struct list_head *list_itr; | ||
124 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
125 | |||
126 | if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) { | ||
127 | pr_info("QAT: Device not configured\n"); | ||
128 | return -EFAULT; | ||
129 | } | ||
130 | set_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
131 | |||
132 | if (adf_ae_init(accel_dev)) { | ||
133 | pr_err("QAT: Failed to initialise Acceleration Engine\n"); | ||
134 | return -EFAULT; | ||
135 | } | ||
136 | set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); | ||
137 | |||
138 | if (adf_ae_fw_load(accel_dev)) { | ||
139 | pr_err("QAT: Failed to load acceleration FW\n"); | ||
140 | adf_ae_fw_release(accel_dev); | ||
141 | return -EFAULT; | ||
142 | } | ||
143 | set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); | ||
144 | |||
145 | if (hw_data->alloc_irq(accel_dev)) { | ||
146 | pr_err("QAT: Failed to allocate interrupts\n"); | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); | ||
150 | |||
151 | /* | ||
152 | * Subservice initialisation is divided into two stages: init and start. | ||
153 | * This is to facilitate any ordering dependencies between services | ||
154 | * prior to starting any of the accelerators. | ||
155 | */ | ||
156 | list_for_each(list_itr, &service_table) { | ||
157 | service = list_entry(list_itr, struct service_hndl, list); | ||
158 | if (!service->admin) | ||
159 | continue; | ||
160 | if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { | ||
161 | pr_err("QAT: Failed to initialise service %s\n", | ||
162 | service->name); | ||
163 | return -EFAULT; | ||
164 | } | ||
165 | set_bit(accel_dev->accel_id, &service->init_status); | ||
166 | } | ||
167 | list_for_each(list_itr, &service_table) { | ||
168 | service = list_entry(list_itr, struct service_hndl, list); | ||
169 | if (service->admin) | ||
170 | continue; | ||
171 | if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { | ||
172 | pr_err("QAT: Failed to initialise service %s\n", | ||
173 | service->name); | ||
174 | return -EFAULT; | ||
175 | } | ||
176 | set_bit(accel_dev->accel_id, &service->init_status); | ||
177 | } | ||
178 | |||
179 | hw_data->enable_error_correction(accel_dev); | ||
180 | |||
181 | if (adf_ae_start(accel_dev)) { | ||
182 | pr_err("QAT: AE Start Failed\n"); | ||
183 | return -EFAULT; | ||
184 | } | ||
185 | set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); | ||
186 | |||
187 | list_for_each(list_itr, &service_table) { | ||
188 | service = list_entry(list_itr, struct service_hndl, list); | ||
189 | if (!service->admin) | ||
190 | continue; | ||
191 | if (service->event_hld(accel_dev, ADF_EVENT_START)) { | ||
192 | pr_err("QAT: Failed to start service %s\n", | ||
193 | service->name); | ||
194 | return -EFAULT; | ||
195 | } | ||
196 | set_bit(accel_dev->accel_id, &service->start_status); | ||
197 | } | ||
198 | list_for_each(list_itr, &service_table) { | ||
199 | service = list_entry(list_itr, struct service_hndl, list); | ||
200 | if (service->admin) | ||
201 | continue; | ||
202 | if (service->event_hld(accel_dev, ADF_EVENT_START)) { | ||
203 | pr_err("QAT: Failed to start service %s\n", | ||
204 | service->name); | ||
205 | return -EFAULT; | ||
206 | } | ||
207 | set_bit(accel_dev->accel_id, &service->start_status); | ||
208 | } | ||
209 | |||
210 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
211 | set_bit(ADF_STATUS_STARTED, &accel_dev->status); | ||
212 | |||
213 | if (qat_algs_register()) { | ||
214 | pr_err("QAT: Failed to register crypto algs\n"); | ||
215 | set_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
216 | clear_bit(ADF_STATUS_STARTED, &accel_dev->status); | ||
217 | return -EFAULT; | ||
218 | } | ||
219 | return 0; | ||
220 | } | ||
221 | EXPORT_SYMBOL_GPL(adf_dev_start); | ||
222 | |||
223 | /** | ||
224 | * adf_dev_stop() - Stop acceleration service for the given accel device | ||
225 | * @accel_dev: Pointer to acceleration device. | ||
226 | * | ||
227 | * Function notifies all the registered services that the acceleration device | ||
228 | * is shuting down. | ||
229 | * To be used by QAT device specific drivers. | ||
230 | * | ||
231 | * Return: 0 on success, error code othewise. | ||
232 | */ | ||
233 | int adf_dev_stop(struct adf_accel_dev *accel_dev) | ||
234 | { | ||
235 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
236 | struct service_hndl *service; | ||
237 | struct list_head *list_itr; | ||
238 | int ret, wait = 0; | ||
239 | |||
240 | if (!adf_dev_started(accel_dev) && | ||
241 | !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { | ||
242 | return 0; | ||
243 | } | ||
244 | clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
245 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
246 | clear_bit(ADF_STATUS_STARTED, &accel_dev->status); | ||
247 | |||
248 | if (qat_algs_unregister()) | ||
249 | pr_err("QAT: Failed to unregister crypto algs\n"); | ||
250 | |||
251 | list_for_each(list_itr, &service_table) { | ||
252 | service = list_entry(list_itr, struct service_hndl, list); | ||
253 | if (service->admin) | ||
254 | continue; | ||
255 | if (!test_bit(accel_dev->accel_id, &service->start_status)) | ||
256 | continue; | ||
257 | ret = service->event_hld(accel_dev, ADF_EVENT_STOP); | ||
258 | if (!ret) { | ||
259 | clear_bit(accel_dev->accel_id, &service->start_status); | ||
260 | } else if (ret == -EAGAIN) { | ||
261 | wait = 1; | ||
262 | clear_bit(accel_dev->accel_id, &service->start_status); | ||
263 | } | ||
264 | } | ||
265 | list_for_each(list_itr, &service_table) { | ||
266 | service = list_entry(list_itr, struct service_hndl, list); | ||
267 | if (!service->admin) | ||
268 | continue; | ||
269 | if (!test_bit(accel_dev->accel_id, &service->start_status)) | ||
270 | continue; | ||
271 | if (service->event_hld(accel_dev, ADF_EVENT_STOP)) | ||
272 | pr_err("QAT: Failed to shutdown service %s\n", | ||
273 | service->name); | ||
274 | else | ||
275 | clear_bit(accel_dev->accel_id, &service->start_status); | ||
276 | } | ||
277 | |||
278 | if (wait) | ||
279 | msleep(100); | ||
280 | |||
281 | if (adf_dev_started(accel_dev)) { | ||
282 | if (adf_ae_stop(accel_dev)) | ||
283 | pr_err("QAT: failed to stop AE\n"); | ||
284 | else | ||
285 | clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); | ||
286 | } | ||
287 | |||
288 | if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { | ||
289 | if (adf_ae_fw_release(accel_dev)) | ||
290 | pr_err("QAT: Failed to release the ucode\n"); | ||
291 | else | ||
292 | clear_bit(ADF_STATUS_AE_UCODE_LOADED, | ||
293 | &accel_dev->status); | ||
294 | } | ||
295 | |||
296 | if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { | ||
297 | if (adf_ae_shutdown(accel_dev)) | ||
298 | pr_err("QAT: Failed to shutdown Accel Engine\n"); | ||
299 | else | ||
300 | clear_bit(ADF_STATUS_AE_INITIALISED, | ||
301 | &accel_dev->status); | ||
302 | } | ||
303 | |||
304 | list_for_each(list_itr, &service_table) { | ||
305 | service = list_entry(list_itr, struct service_hndl, list); | ||
306 | if (service->admin) | ||
307 | continue; | ||
308 | if (!test_bit(accel_dev->accel_id, &service->init_status)) | ||
309 | continue; | ||
310 | if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) | ||
311 | pr_err("QAT: Failed to shutdown service %s\n", | ||
312 | service->name); | ||
313 | else | ||
314 | clear_bit(accel_dev->accel_id, &service->init_status); | ||
315 | } | ||
316 | list_for_each(list_itr, &service_table) { | ||
317 | service = list_entry(list_itr, struct service_hndl, list); | ||
318 | if (!service->admin) | ||
319 | continue; | ||
320 | if (!test_bit(accel_dev->accel_id, &service->init_status)) | ||
321 | continue; | ||
322 | if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) | ||
323 | pr_err("QAT: Failed to shutdown service %s\n", | ||
324 | service->name); | ||
325 | else | ||
326 | clear_bit(accel_dev->accel_id, &service->init_status); | ||
327 | } | ||
328 | |||
329 | if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { | ||
330 | hw_data->free_irq(accel_dev); | ||
331 | clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); | ||
332 | } | ||
333 | |||
334 | /* Delete configuration only if not restarting */ | ||
335 | if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) | ||
336 | adf_cfg_del_all(accel_dev); | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | EXPORT_SYMBOL_GPL(adf_dev_stop); | ||
341 | |||
342 | int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) | ||
343 | { | ||
344 | struct service_hndl *service; | ||
345 | struct list_head *list_itr; | ||
346 | |||
347 | list_for_each(list_itr, &service_table) { | ||
348 | service = list_entry(list_itr, struct service_hndl, list); | ||
349 | if (service->admin) | ||
350 | continue; | ||
351 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) | ||
352 | pr_err("QAT: Failed to restart service %s.\n", | ||
353 | service->name); | ||
354 | } | ||
355 | list_for_each(list_itr, &service_table) { | ||
356 | service = list_entry(list_itr, struct service_hndl, list); | ||
357 | if (!service->admin) | ||
358 | continue; | ||
359 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) | ||
360 | pr_err("QAT: Failed to restart service %s.\n", | ||
361 | service->name); | ||
362 | } | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) | ||
367 | { | ||
368 | struct service_hndl *service; | ||
369 | struct list_head *list_itr; | ||
370 | |||
371 | list_for_each(list_itr, &service_table) { | ||
372 | service = list_entry(list_itr, struct service_hndl, list); | ||
373 | if (service->admin) | ||
374 | continue; | ||
375 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) | ||
376 | pr_err("QAT: Failed to restart service %s.\n", | ||
377 | service->name); | ||
378 | } | ||
379 | list_for_each(list_itr, &service_table) { | ||
380 | service = list_entry(list_itr, struct service_hndl, list); | ||
381 | if (!service->admin) | ||
382 | continue; | ||
383 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) | ||
384 | pr_err("QAT: Failed to restart service %s.\n", | ||
385 | service->name); | ||
386 | } | ||
387 | return 0; | ||
388 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c new file mode 100644 index 000000000000..5f3fa45348b4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport.c | |||
@@ -0,0 +1,567 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/delay.h> | ||
48 | #include "adf_accel_devices.h" | ||
49 | #include "adf_transport_internal.h" | ||
50 | #include "adf_transport_access_macros.h" | ||
51 | #include "adf_cfg.h" | ||
52 | #include "adf_common_drv.h" | ||
53 | |||
54 | static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) | ||
55 | { | ||
56 | uint32_t div = data >> shift; | ||
57 | uint32_t mult = div << shift; | ||
58 | |||
59 | return data - mult; | ||
60 | } | ||
61 | |||
62 | static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) | ||
63 | { | ||
64 | if (((size - 1) & addr) != 0) | ||
65 | return -EFAULT; | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) | ||
70 | { | ||
71 | int i = ADF_MIN_RING_SIZE; | ||
72 | |||
73 | for (; i <= ADF_MAX_RING_SIZE; i++) | ||
74 | if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) | ||
75 | return i; | ||
76 | |||
77 | return ADF_DEFAULT_RING_SIZE; | ||
78 | } | ||
79 | |||
80 | static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) | ||
81 | { | ||
82 | spin_lock(&bank->lock); | ||
83 | if (bank->ring_mask & (1 << ring)) { | ||
84 | spin_unlock(&bank->lock); | ||
85 | return -EFAULT; | ||
86 | } | ||
87 | bank->ring_mask |= (1 << ring); | ||
88 | spin_unlock(&bank->lock); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) | ||
93 | { | ||
94 | spin_lock(&bank->lock); | ||
95 | bank->ring_mask &= ~(1 << ring); | ||
96 | spin_unlock(&bank->lock); | ||
97 | } | ||
98 | |||
99 | static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) | ||
100 | { | ||
101 | spin_lock_bh(&bank->lock); | ||
102 | bank->irq_mask |= (1 << ring); | ||
103 | spin_unlock_bh(&bank->lock); | ||
104 | WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); | ||
105 | WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number, | ||
106 | bank->irq_coalesc_timer); | ||
107 | } | ||
108 | |||
109 | static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) | ||
110 | { | ||
111 | spin_lock_bh(&bank->lock); | ||
112 | bank->irq_mask &= ~(1 << ring); | ||
113 | spin_unlock_bh(&bank->lock); | ||
114 | WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); | ||
115 | } | ||
116 | |||
117 | int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) | ||
118 | { | ||
119 | if (atomic_add_return(1, ring->inflights) > | ||
120 | ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { | ||
121 | atomic_dec(ring->inflights); | ||
122 | return -EAGAIN; | ||
123 | } | ||
124 | spin_lock_bh(&ring->lock); | ||
125 | memcpy(ring->base_addr + ring->tail, msg, | ||
126 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); | ||
127 | |||
128 | ring->tail = adf_modulo(ring->tail + | ||
129 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size), | ||
130 | ADF_RING_SIZE_MODULO(ring->ring_size)); | ||
131 | WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number, | ||
132 | ring->ring_number, ring->tail); | ||
133 | spin_unlock_bh(&ring->lock); | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static int adf_handle_response(struct adf_etr_ring_data *ring) | ||
138 | { | ||
139 | uint32_t msg_counter = 0; | ||
140 | uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head); | ||
141 | |||
142 | while (*msg != ADF_RING_EMPTY_SIG) { | ||
143 | ring->callback((uint32_t *)msg); | ||
144 | *msg = ADF_RING_EMPTY_SIG; | ||
145 | ring->head = adf_modulo(ring->head + | ||
146 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size), | ||
147 | ADF_RING_SIZE_MODULO(ring->ring_size)); | ||
148 | msg_counter++; | ||
149 | msg = (uint32_t *)(ring->base_addr + ring->head); | ||
150 | } | ||
151 | if (msg_counter > 0) { | ||
152 | WRITE_CSR_RING_HEAD(ring->bank->csr_addr, | ||
153 | ring->bank->bank_number, | ||
154 | ring->ring_number, ring->head); | ||
155 | atomic_sub(msg_counter, ring->inflights); | ||
156 | } | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) | ||
161 | { | ||
162 | uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size); | ||
163 | |||
164 | WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, | ||
165 | ring->ring_number, ring_config); | ||
166 | } | ||
167 | |||
168 | static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) | ||
169 | { | ||
170 | uint32_t ring_config = | ||
171 | BUILD_RESP_RING_CONFIG(ring->ring_size, | ||
172 | ADF_RING_NEAR_WATERMARK_512, | ||
173 | ADF_RING_NEAR_WATERMARK_0); | ||
174 | |||
175 | WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, | ||
176 | ring->ring_number, ring_config); | ||
177 | } | ||
178 | |||
179 | static int adf_init_ring(struct adf_etr_ring_data *ring) | ||
180 | { | ||
181 | struct adf_etr_bank_data *bank = ring->bank; | ||
182 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
183 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
184 | uint64_t ring_base; | ||
185 | uint32_t ring_size_bytes = | ||
186 | ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); | ||
187 | |||
188 | ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); | ||
189 | ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev), | ||
190 | ring_size_bytes, &ring->dma_addr, | ||
191 | GFP_KERNEL); | ||
192 | if (!ring->base_addr) | ||
193 | return -ENOMEM; | ||
194 | |||
195 | memset(ring->base_addr, 0x7F, ring_size_bytes); | ||
196 | /* The base_addr has to be aligned to the size of the buffer */ | ||
197 | if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) { | ||
198 | pr_err("QAT: Ring address not aligned\n"); | ||
199 | dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, | ||
200 | ring->base_addr, ring->dma_addr); | ||
201 | return -EFAULT; | ||
202 | } | ||
203 | |||
204 | if (hw_data->tx_rings_mask & (1 << ring->ring_number)) | ||
205 | adf_configure_tx_ring(ring); | ||
206 | |||
207 | else | ||
208 | adf_configure_rx_ring(ring); | ||
209 | |||
210 | ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size); | ||
211 | WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number, | ||
212 | ring->ring_number, ring_base); | ||
213 | spin_lock_init(&ring->lock); | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static void adf_cleanup_ring(struct adf_etr_ring_data *ring) | ||
218 | { | ||
219 | uint32_t ring_size_bytes = | ||
220 | ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); | ||
221 | ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); | ||
222 | |||
223 | if (ring->base_addr) { | ||
224 | memset(ring->base_addr, 0x7F, ring_size_bytes); | ||
225 | dma_free_coherent(&GET_DEV(ring->bank->accel_dev), | ||
226 | ring_size_bytes, ring->base_addr, | ||
227 | ring->dma_addr); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, | ||
232 | uint32_t bank_num, uint32_t num_msgs, | ||
233 | uint32_t msg_size, const char *ring_name, | ||
234 | adf_callback_fn callback, int poll_mode, | ||
235 | struct adf_etr_ring_data **ring_ptr) | ||
236 | { | ||
237 | struct adf_etr_data *transport_data = accel_dev->transport; | ||
238 | struct adf_etr_bank_data *bank; | ||
239 | struct adf_etr_ring_data *ring; | ||
240 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
241 | uint32_t ring_num; | ||
242 | int ret; | ||
243 | |||
244 | if (bank_num >= GET_MAX_BANKS(accel_dev)) { | ||
245 | pr_err("QAT: Invalid bank number\n"); | ||
246 | return -EFAULT; | ||
247 | } | ||
248 | if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { | ||
249 | pr_err("QAT: Invalid msg size\n"); | ||
250 | return -EFAULT; | ||
251 | } | ||
252 | if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs), | ||
253 | ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) { | ||
254 | pr_err("QAT: Invalid ring size for given msg size\n"); | ||
255 | return -EFAULT; | ||
256 | } | ||
257 | if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) { | ||
258 | pr_err("QAT: Section %s, no such entry : %s\n", | ||
259 | section, ring_name); | ||
260 | return -EFAULT; | ||
261 | } | ||
262 | if (kstrtouint(val, 10, &ring_num)) { | ||
263 | pr_err("QAT: Can't get ring number\n"); | ||
264 | return -EFAULT; | ||
265 | } | ||
266 | |||
267 | bank = &transport_data->banks[bank_num]; | ||
268 | if (adf_reserve_ring(bank, ring_num)) { | ||
269 | pr_err("QAT: Ring %d, %s already exists.\n", | ||
270 | ring_num, ring_name); | ||
271 | return -EFAULT; | ||
272 | } | ||
273 | ring = &bank->rings[ring_num]; | ||
274 | ring->ring_number = ring_num; | ||
275 | ring->bank = bank; | ||
276 | ring->callback = callback; | ||
277 | ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size); | ||
278 | ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); | ||
279 | ring->head = 0; | ||
280 | ring->tail = 0; | ||
281 | atomic_set(ring->inflights, 0); | ||
282 | ret = adf_init_ring(ring); | ||
283 | if (ret) | ||
284 | goto err; | ||
285 | |||
286 | /* Enable HW arbitration for the given ring */ | ||
287 | accel_dev->hw_device->hw_arb_ring_enable(ring); | ||
288 | |||
289 | if (adf_ring_debugfs_add(ring, ring_name)) { | ||
290 | pr_err("QAT: Couldn't add ring debugfs entry\n"); | ||
291 | ret = -EFAULT; | ||
292 | goto err; | ||
293 | } | ||
294 | |||
295 | /* Enable interrupts if needed */ | ||
296 | if (callback && (!poll_mode)) | ||
297 | adf_enable_ring_irq(bank, ring->ring_number); | ||
298 | *ring_ptr = ring; | ||
299 | return 0; | ||
300 | err: | ||
301 | adf_cleanup_ring(ring); | ||
302 | adf_unreserve_ring(bank, ring_num); | ||
303 | accel_dev->hw_device->hw_arb_ring_disable(ring); | ||
304 | return ret; | ||
305 | } | ||
306 | |||
307 | void adf_remove_ring(struct adf_etr_ring_data *ring) | ||
308 | { | ||
309 | struct adf_etr_bank_data *bank = ring->bank; | ||
310 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
311 | |||
312 | /* Disable interrupts for the given ring */ | ||
313 | adf_disable_ring_irq(bank, ring->ring_number); | ||
314 | |||
315 | /* Clear PCI config space */ | ||
316 | WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number, | ||
317 | ring->ring_number, 0); | ||
318 | WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number, | ||
319 | ring->ring_number, 0); | ||
320 | adf_ring_debugfs_rm(ring); | ||
321 | adf_unreserve_ring(bank, ring->ring_number); | ||
322 | /* Disable HW arbitration for the given ring */ | ||
323 | accel_dev->hw_device->hw_arb_ring_disable(ring); | ||
324 | adf_cleanup_ring(ring); | ||
325 | } | ||
326 | |||
327 | static void adf_ring_response_handler(struct adf_etr_bank_data *bank) | ||
328 | { | ||
329 | uint32_t empty_rings, i; | ||
330 | |||
331 | empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); | ||
332 | empty_rings = ~empty_rings & bank->irq_mask; | ||
333 | |||
334 | for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) { | ||
335 | if (empty_rings & (1 << i)) | ||
336 | adf_handle_response(&bank->rings[i]); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * adf_response_handler() - Bottom half handler response handler | ||
342 | * @bank_addr: Address of a ring bank for with the BH was scheduled. | ||
343 | * | ||
344 | * Function is the bottom half handler for the response from acceleration | ||
345 | * device. There is one handler for every ring bank. Function checks all | ||
346 | * communication rings in the bank. | ||
347 | * To be used by QAT device specific drivers. | ||
348 | * | ||
349 | * Return: void | ||
350 | */ | ||
351 | void adf_response_handler(unsigned long bank_addr) | ||
352 | { | ||
353 | struct adf_etr_bank_data *bank = (void *)bank_addr; | ||
354 | |||
355 | /* Handle all the responses nad reenable IRQs */ | ||
356 | adf_ring_response_handler(bank); | ||
357 | WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, | ||
358 | bank->irq_mask); | ||
359 | } | ||
360 | EXPORT_SYMBOL_GPL(adf_response_handler); | ||
361 | |||
362 | static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, | ||
363 | const char *section, const char *format, | ||
364 | uint32_t key, uint32_t *value) | ||
365 | { | ||
366 | char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
367 | char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
368 | |||
369 | snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key); | ||
370 | |||
371 | if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf)) | ||
372 | return -EFAULT; | ||
373 | |||
374 | if (kstrtouint(val_buf, 10, value)) | ||
375 | return -EFAULT; | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static void adf_enable_coalesc(struct adf_etr_bank_data *bank, | ||
380 | const char *section, uint32_t bank_num_in_accel) | ||
381 | { | ||
382 | if (adf_get_cfg_int(bank->accel_dev, section, | ||
383 | ADF_ETRMGR_COALESCE_TIMER_FORMAT, | ||
384 | bank_num_in_accel, &bank->irq_coalesc_timer)) | ||
385 | bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; | ||
386 | |||
387 | if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer || | ||
388 | ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer) | ||
389 | bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME; | ||
390 | } | ||
391 | |||
392 | static int adf_init_bank(struct adf_accel_dev *accel_dev, | ||
393 | struct adf_etr_bank_data *bank, | ||
394 | uint32_t bank_num, void __iomem *csr_addr) | ||
395 | { | ||
396 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
397 | struct adf_etr_ring_data *ring; | ||
398 | struct adf_etr_ring_data *tx_ring; | ||
399 | uint32_t i, coalesc_enabled; | ||
400 | |||
401 | memset(bank, 0, sizeof(*bank)); | ||
402 | bank->bank_number = bank_num; | ||
403 | bank->csr_addr = csr_addr; | ||
404 | bank->accel_dev = accel_dev; | ||
405 | spin_lock_init(&bank->lock); | ||
406 | |||
407 | /* Enable IRQ coalescing always. This will allow to use | ||
408 | * the optimised flag and coalesc register. | ||
409 | * If it is disabled in the config file just use min time value */ | ||
410 | if (adf_get_cfg_int(accel_dev, "Accelerator0", | ||
411 | ADF_ETRMGR_COALESCING_ENABLED_FORMAT, | ||
412 | bank_num, &coalesc_enabled) && coalesc_enabled) | ||
413 | adf_enable_coalesc(bank, "Accelerator0", bank_num); | ||
414 | else | ||
415 | bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME; | ||
416 | |||
417 | for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { | ||
418 | WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0); | ||
419 | WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); | ||
420 | ring = &bank->rings[i]; | ||
421 | if (hw_data->tx_rings_mask & (1 << i)) { | ||
422 | ring->inflights = kzalloc_node(sizeof(atomic_t), | ||
423 | GFP_KERNEL, | ||
424 | accel_dev->numa_node); | ||
425 | if (!ring->inflights) | ||
426 | goto err; | ||
427 | } else { | ||
428 | if (i < hw_data->tx_rx_gap) { | ||
429 | pr_err("QAT: Invalid tx rings mask config\n"); | ||
430 | goto err; | ||
431 | } | ||
432 | tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; | ||
433 | ring->inflights = tx_ring->inflights; | ||
434 | } | ||
435 | } | ||
436 | if (adf_bank_debugfs_add(bank)) { | ||
437 | pr_err("QAT: Failed to add bank debugfs entry\n"); | ||
438 | goto err; | ||
439 | } | ||
440 | |||
441 | WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); | ||
442 | return 0; | ||
443 | err: | ||
444 | for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { | ||
445 | ring = &bank->rings[i]; | ||
446 | if (hw_data->tx_rings_mask & (1 << i) && ring->inflights) | ||
447 | kfree(ring->inflights); | ||
448 | } | ||
449 | return -ENOMEM; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * adf_init_etr_data() - Initialize transport rings for acceleration device | ||
454 | * @accel_dev: Pointer to acceleration device. | ||
455 | * | ||
456 | * Function is the initializes the communications channels (rings) to the | ||
457 | * acceleration device accel_dev. | ||
458 | * To be used by QAT device specific drivers. | ||
459 | * | ||
460 | * Return: 0 on success, error code othewise. | ||
461 | */ | ||
462 | int adf_init_etr_data(struct adf_accel_dev *accel_dev) | ||
463 | { | ||
464 | struct adf_etr_data *etr_data; | ||
465 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
466 | void __iomem *csr_addr; | ||
467 | uint32_t size; | ||
468 | uint32_t num_banks = 0; | ||
469 | int i, ret; | ||
470 | |||
471 | etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, | ||
472 | accel_dev->numa_node); | ||
473 | if (!etr_data) | ||
474 | return -ENOMEM; | ||
475 | |||
476 | num_banks = GET_MAX_BANKS(accel_dev); | ||
477 | size = num_banks * sizeof(struct adf_etr_bank_data); | ||
478 | etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); | ||
479 | if (!etr_data->banks) { | ||
480 | ret = -ENOMEM; | ||
481 | goto err_bank; | ||
482 | } | ||
483 | |||
484 | accel_dev->transport = etr_data; | ||
485 | i = hw_data->get_etr_bar_id(hw_data); | ||
486 | csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; | ||
487 | |||
488 | /* accel_dev->debugfs_dir should always be non-NULL here */ | ||
489 | etr_data->debug = debugfs_create_dir("transport", | ||
490 | accel_dev->debugfs_dir); | ||
491 | if (!etr_data->debug) { | ||
492 | pr_err("QAT: Unable to create transport debugfs entry\n"); | ||
493 | ret = -ENOENT; | ||
494 | goto err_bank_debug; | ||
495 | } | ||
496 | |||
497 | for (i = 0; i < num_banks; i++) { | ||
498 | ret = adf_init_bank(accel_dev, &etr_data->banks[i], i, | ||
499 | csr_addr); | ||
500 | if (ret) | ||
501 | goto err_bank_all; | ||
502 | } | ||
503 | |||
504 | return 0; | ||
505 | |||
506 | err_bank_all: | ||
507 | debugfs_remove(etr_data->debug); | ||
508 | err_bank_debug: | ||
509 | kfree(etr_data->banks); | ||
510 | err_bank: | ||
511 | kfree(etr_data); | ||
512 | accel_dev->transport = NULL; | ||
513 | return ret; | ||
514 | } | ||
515 | EXPORT_SYMBOL_GPL(adf_init_etr_data); | ||
516 | |||
517 | static void cleanup_bank(struct adf_etr_bank_data *bank) | ||
518 | { | ||
519 | uint32_t i; | ||
520 | |||
521 | for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { | ||
522 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
523 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
524 | struct adf_etr_ring_data *ring = &bank->rings[i]; | ||
525 | |||
526 | if (bank->ring_mask & (1 << i)) | ||
527 | adf_cleanup_ring(ring); | ||
528 | |||
529 | if (hw_data->tx_rings_mask & (1 << i)) | ||
530 | kfree(ring->inflights); | ||
531 | } | ||
532 | adf_bank_debugfs_rm(bank); | ||
533 | memset(bank, 0, sizeof(*bank)); | ||
534 | } | ||
535 | |||
536 | static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) | ||
537 | { | ||
538 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
539 | uint32_t i, num_banks = GET_MAX_BANKS(accel_dev); | ||
540 | |||
541 | for (i = 0; i < num_banks; i++) | ||
542 | cleanup_bank(&etr_data->banks[i]); | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * adf_cleanup_etr_data() - Clear transport rings for acceleration device | ||
547 | * @accel_dev: Pointer to acceleration device. | ||
548 | * | ||
549 | * Function is the clears the communications channels (rings) of the | ||
550 | * acceleration device accel_dev. | ||
551 | * To be used by QAT device specific drivers. | ||
552 | * | ||
553 | * Return: void | ||
554 | */ | ||
555 | void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev) | ||
556 | { | ||
557 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
558 | |||
559 | if (etr_data) { | ||
560 | adf_cleanup_etr_handles(accel_dev); | ||
561 | debugfs_remove(etr_data->debug); | ||
562 | kfree(etr_data->banks); | ||
563 | kfree(etr_data); | ||
564 | accel_dev->transport = NULL; | ||
565 | } | ||
566 | } | ||
567 | EXPORT_SYMBOL_GPL(adf_cleanup_etr_data); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h new file mode 100644 index 000000000000..386485bd9c95 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_TRANSPORT_H | ||
48 | #define ADF_TRANSPORT_H | ||
49 | |||
50 | #include "adf_accel_devices.h" | ||
51 | |||
52 | struct adf_etr_ring_data; | ||
53 | |||
54 | typedef void (*adf_callback_fn)(void *resp_msg); | ||
55 | |||
56 | int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, | ||
57 | uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size, | ||
58 | const char *ring_name, adf_callback_fn callback, | ||
59 | int poll_mode, struct adf_etr_ring_data **ring_ptr); | ||
60 | |||
61 | int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg); | ||
62 | void adf_remove_ring(struct adf_etr_ring_data *ring); | ||
63 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h new file mode 100644 index 000000000000..91d88d676580 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_TRANSPORT_ACCESS_MACROS_H | ||
48 | #define ADF_TRANSPORT_ACCESS_MACROS_H | ||
49 | |||
50 | #include "adf_accel_devices.h" | ||
51 | #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL | ||
52 | #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL | ||
53 | #define ADF_RING_CSR_RING_CONFIG 0x000 | ||
54 | #define ADF_RING_CSR_RING_LBASE 0x040 | ||
55 | #define ADF_RING_CSR_RING_UBASE 0x080 | ||
56 | #define ADF_RING_CSR_RING_HEAD 0x0C0 | ||
57 | #define ADF_RING_CSR_RING_TAIL 0x100 | ||
58 | #define ADF_RING_CSR_E_STAT 0x14C | ||
59 | #define ADF_RING_CSR_INT_SRCSEL 0x174 | ||
60 | #define ADF_RING_CSR_INT_SRCSEL_2 0x178 | ||
61 | #define ADF_RING_CSR_INT_COL_EN 0x17C | ||
62 | #define ADF_RING_CSR_INT_COL_CTL 0x180 | ||
63 | #define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 | ||
64 | #define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 | ||
65 | #define ADF_RING_BUNDLE_SIZE 0x1000 | ||
66 | #define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A | ||
67 | #define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 | ||
68 | #define ADF_COALESCING_MIN_TIME 0x1FF | ||
69 | #define ADF_COALESCING_MAX_TIME 0xFFFFF | ||
70 | #define ADF_COALESCING_DEF_TIME 0x27FF | ||
71 | #define ADF_RING_NEAR_WATERMARK_512 0x08 | ||
72 | #define ADF_RING_NEAR_WATERMARK_0 0x00 | ||
73 | #define ADF_RING_EMPTY_SIG 0x7F7F7F7F | ||
74 | |||
75 | /* Valid internal ring size values */ | ||
76 | #define ADF_RING_SIZE_128 0x01 | ||
77 | #define ADF_RING_SIZE_256 0x02 | ||
78 | #define ADF_RING_SIZE_512 0x03 | ||
79 | #define ADF_RING_SIZE_4K 0x06 | ||
80 | #define ADF_RING_SIZE_16K 0x08 | ||
81 | #define ADF_RING_SIZE_4M 0x10 | ||
82 | #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 | ||
83 | #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M | ||
84 | #define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K | ||
85 | |||
86 | /* Valid internal msg size values internal */ | ||
87 | #define ADF_MSG_SIZE_32 0x01 | ||
88 | #define ADF_MSG_SIZE_64 0x02 | ||
89 | #define ADF_MSG_SIZE_128 0x04 | ||
90 | #define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 | ||
91 | #define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 | ||
92 | |||
93 | /* Size to bytes conversion macros for ring and msg values */ | ||
94 | #define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) | ||
95 | #define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) | ||
96 | #define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) | ||
97 | #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) | ||
98 | |||
99 | /* Minimum ring bufer size for memory allocation */ | ||
100 | #define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ | ||
101 | ADF_RING_SIZE_4K : SIZE) | ||
102 | #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) | ||
103 | #define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ | ||
104 | ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1) | ||
105 | #define BUILD_RING_CONFIG(size) \ | ||
106 | ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ | ||
107 | | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ | ||
108 | | size) | ||
109 | #define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ | ||
110 | ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ | ||
111 | | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ | ||
112 | | size) | ||
113 | #define BUILD_RING_BASE_ADDR(addr, size) \ | ||
114 | ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) | ||
115 | #define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ | ||
116 | ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
117 | ADF_RING_CSR_RING_HEAD + (ring << 2)) | ||
118 | #define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ | ||
119 | ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
120 | ADF_RING_CSR_RING_TAIL + (ring << 2)) | ||
121 | #define READ_CSR_E_STAT(csr_base_addr, bank) \ | ||
122 | ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
123 | ADF_RING_CSR_E_STAT) | ||
124 | #define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ | ||
125 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
126 | ADF_RING_CSR_RING_CONFIG + (ring << 2), value) | ||
127 | #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ | ||
128 | do { \ | ||
129 | uint32_t l_base = 0, u_base = 0; \ | ||
130 | l_base = (uint32_t)(value & 0xFFFFFFFF); \ | ||
131 | u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ | ||
132 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
133 | ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ | ||
134 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
135 | ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ | ||
136 | } while (0) | ||
137 | #define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ | ||
138 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
139 | ADF_RING_CSR_RING_HEAD + (ring << 2), value) | ||
140 | #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ | ||
141 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
142 | ADF_RING_CSR_RING_TAIL + (ring << 2), value) | ||
143 | #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ | ||
144 | do { \ | ||
145 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
146 | ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ | ||
147 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
148 | ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ | ||
149 | } while (0) | ||
150 | #define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ | ||
151 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
152 | ADF_RING_CSR_INT_COL_EN, value) | ||
153 | #define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ | ||
154 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
155 | ADF_RING_CSR_INT_COL_CTL, \ | ||
156 | ADF_RING_CSR_INT_COL_CTL_ENABLE | value) | ||
157 | #define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ | ||
158 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | ||
159 | ADF_RING_CSR_INT_FLAG_AND_COL, value) | ||
160 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c new file mode 100644 index 000000000000..6b6974553514 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/seq_file.h> | ||
50 | #include "adf_accel_devices.h" | ||
51 | #include "adf_transport_internal.h" | ||
52 | #include "adf_transport_access_macros.h" | ||
53 | |||
54 | static DEFINE_MUTEX(ring_read_lock); | ||
55 | static DEFINE_MUTEX(bank_read_lock); | ||
56 | |||
57 | static void *adf_ring_start(struct seq_file *sfile, loff_t *pos) | ||
58 | { | ||
59 | struct adf_etr_ring_data *ring = sfile->private; | ||
60 | |||
61 | mutex_lock(&ring_read_lock); | ||
62 | if (*pos == 0) | ||
63 | return SEQ_START_TOKEN; | ||
64 | |||
65 | if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / | ||
66 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) | ||
67 | return NULL; | ||
68 | |||
69 | return ring->base_addr + | ||
70 | (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); | ||
71 | } | ||
72 | |||
73 | static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos) | ||
74 | { | ||
75 | struct adf_etr_ring_data *ring = sfile->private; | ||
76 | |||
77 | if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / | ||
78 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) | ||
79 | return NULL; | ||
80 | |||
81 | return ring->base_addr + | ||
82 | (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); | ||
83 | } | ||
84 | |||
85 | static int adf_ring_show(struct seq_file *sfile, void *v) | ||
86 | { | ||
87 | struct adf_etr_ring_data *ring = sfile->private; | ||
88 | struct adf_etr_bank_data *bank = ring->bank; | ||
89 | uint32_t *msg = v; | ||
90 | void __iomem *csr = ring->bank->csr_addr; | ||
91 | int i, x; | ||
92 | |||
93 | if (v == SEQ_START_TOKEN) { | ||
94 | int head, tail, empty; | ||
95 | |||
96 | head = READ_CSR_RING_HEAD(csr, bank->bank_number, | ||
97 | ring->ring_number); | ||
98 | tail = READ_CSR_RING_TAIL(csr, bank->bank_number, | ||
99 | ring->ring_number); | ||
100 | empty = READ_CSR_E_STAT(csr, bank->bank_number); | ||
101 | |||
102 | seq_puts(sfile, "------- Ring configuration -------\n"); | ||
103 | seq_printf(sfile, "ring num %d, bank num %d\n", | ||
104 | ring->ring_number, ring->bank->bank_number); | ||
105 | seq_printf(sfile, "head %x, tail %x, empty: %d\n", | ||
106 | head, tail, (empty & 1 << ring->ring_number) | ||
107 | >> ring->ring_number); | ||
108 | seq_printf(sfile, "ring size %d, msg size %d\n", | ||
109 | ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size), | ||
110 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); | ||
111 | seq_puts(sfile, "----------- Ring data ------------\n"); | ||
112 | return 0; | ||
113 | } | ||
114 | seq_printf(sfile, "%p:", msg); | ||
115 | x = 0; | ||
116 | i = 0; | ||
117 | for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) { | ||
118 | seq_printf(sfile, " %08X", *(msg + i)); | ||
119 | if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 && | ||
120 | (++x == 8)) { | ||
121 | seq_printf(sfile, "\n%p:", msg + i + 1); | ||
122 | x = 0; | ||
123 | } | ||
124 | } | ||
125 | seq_puts(sfile, "\n"); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static void adf_ring_stop(struct seq_file *sfile, void *v) | ||
130 | { | ||
131 | mutex_unlock(&ring_read_lock); | ||
132 | } | ||
133 | |||
134 | static const struct seq_operations adf_ring_sops = { | ||
135 | .start = adf_ring_start, | ||
136 | .next = adf_ring_next, | ||
137 | .stop = adf_ring_stop, | ||
138 | .show = adf_ring_show | ||
139 | }; | ||
140 | |||
141 | static int adf_ring_open(struct inode *inode, struct file *file) | ||
142 | { | ||
143 | int ret = seq_open(file, &adf_ring_sops); | ||
144 | |||
145 | if (!ret) { | ||
146 | struct seq_file *seq_f = file->private_data; | ||
147 | |||
148 | seq_f->private = inode->i_private; | ||
149 | } | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | static const struct file_operations adf_ring_debug_fops = { | ||
154 | .open = adf_ring_open, | ||
155 | .read = seq_read, | ||
156 | .llseek = seq_lseek, | ||
157 | .release = seq_release | ||
158 | }; | ||
159 | |||
160 | int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) | ||
161 | { | ||
162 | struct adf_etr_ring_debug_entry *ring_debug; | ||
163 | char entry_name[8]; | ||
164 | |||
165 | ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL); | ||
166 | if (!ring_debug) | ||
167 | return -ENOMEM; | ||
168 | |||
169 | strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); | ||
170 | snprintf(entry_name, sizeof(entry_name), "ring_%02d", | ||
171 | ring->ring_number); | ||
172 | |||
173 | ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR, | ||
174 | ring->bank->bank_debug_dir, | ||
175 | ring, &adf_ring_debug_fops); | ||
176 | if (!ring_debug->debug) { | ||
177 | pr_err("QAT: Failed to create ring debug entry.\n"); | ||
178 | kfree(ring_debug); | ||
179 | return -EFAULT; | ||
180 | } | ||
181 | ring->ring_debug = ring_debug; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) | ||
186 | { | ||
187 | if (ring->ring_debug) { | ||
188 | debugfs_remove(ring->ring_debug->debug); | ||
189 | kfree(ring->ring_debug); | ||
190 | ring->ring_debug = NULL; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | static void *adf_bank_start(struct seq_file *sfile, loff_t *pos) | ||
195 | { | ||
196 | mutex_lock(&bank_read_lock); | ||
197 | if (*pos == 0) | ||
198 | return SEQ_START_TOKEN; | ||
199 | |||
200 | if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK) | ||
201 | return NULL; | ||
202 | |||
203 | return pos; | ||
204 | } | ||
205 | |||
206 | static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos) | ||
207 | { | ||
208 | if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK) | ||
209 | return NULL; | ||
210 | |||
211 | return pos; | ||
212 | } | ||
213 | |||
214 | static int adf_bank_show(struct seq_file *sfile, void *v) | ||
215 | { | ||
216 | struct adf_etr_bank_data *bank = sfile->private; | ||
217 | |||
218 | if (v == SEQ_START_TOKEN) { | ||
219 | seq_printf(sfile, "------- Bank %d configuration -------\n", | ||
220 | bank->bank_number); | ||
221 | } else { | ||
222 | int ring_id = *((int *)v) - 1; | ||
223 | struct adf_etr_ring_data *ring = &bank->rings[ring_id]; | ||
224 | void __iomem *csr = bank->csr_addr; | ||
225 | int head, tail, empty; | ||
226 | |||
227 | if (!(bank->ring_mask & 1 << ring_id)) | ||
228 | return 0; | ||
229 | |||
230 | head = READ_CSR_RING_HEAD(csr, bank->bank_number, | ||
231 | ring->ring_number); | ||
232 | tail = READ_CSR_RING_TAIL(csr, bank->bank_number, | ||
233 | ring->ring_number); | ||
234 | empty = READ_CSR_E_STAT(csr, bank->bank_number); | ||
235 | |||
236 | seq_printf(sfile, | ||
237 | "ring num %02d, head %04x, tail %04x, empty: %d\n", | ||
238 | ring->ring_number, head, tail, | ||
239 | (empty & 1 << ring->ring_number) >> | ||
240 | ring->ring_number); | ||
241 | } | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static void adf_bank_stop(struct seq_file *sfile, void *v) | ||
246 | { | ||
247 | mutex_unlock(&bank_read_lock); | ||
248 | } | ||
249 | |||
250 | static const struct seq_operations adf_bank_sops = { | ||
251 | .start = adf_bank_start, | ||
252 | .next = adf_bank_next, | ||
253 | .stop = adf_bank_stop, | ||
254 | .show = adf_bank_show | ||
255 | }; | ||
256 | |||
257 | static int adf_bank_open(struct inode *inode, struct file *file) | ||
258 | { | ||
259 | int ret = seq_open(file, &adf_bank_sops); | ||
260 | |||
261 | if (!ret) { | ||
262 | struct seq_file *seq_f = file->private_data; | ||
263 | |||
264 | seq_f->private = inode->i_private; | ||
265 | } | ||
266 | return ret; | ||
267 | } | ||
268 | |||
269 | static const struct file_operations adf_bank_debug_fops = { | ||
270 | .open = adf_bank_open, | ||
271 | .read = seq_read, | ||
272 | .llseek = seq_lseek, | ||
273 | .release = seq_release | ||
274 | }; | ||
275 | |||
276 | int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) | ||
277 | { | ||
278 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
279 | struct dentry *parent = accel_dev->transport->debug; | ||
280 | char name[8]; | ||
281 | |||
282 | snprintf(name, sizeof(name), "bank_%02d", bank->bank_number); | ||
283 | bank->bank_debug_dir = debugfs_create_dir(name, parent); | ||
284 | if (!bank->bank_debug_dir) { | ||
285 | pr_err("QAT: Failed to create bank debug dir.\n"); | ||
286 | return -EFAULT; | ||
287 | } | ||
288 | |||
289 | bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR, | ||
290 | bank->bank_debug_dir, bank, | ||
291 | &adf_bank_debug_fops); | ||
292 | if (!bank->bank_debug_cfg) { | ||
293 | pr_err("QAT: Failed to create bank debug entry.\n"); | ||
294 | debugfs_remove(bank->bank_debug_dir); | ||
295 | return -EFAULT; | ||
296 | } | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank) | ||
301 | { | ||
302 | debugfs_remove(bank->bank_debug_cfg); | ||
303 | debugfs_remove(bank->bank_debug_dir); | ||
304 | } | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h new file mode 100644 index 000000000000..f854bac276b0 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_TRANSPORT_INTRN_H | ||
48 | #define ADF_TRANSPORT_INTRN_H | ||
49 | |||
50 | #include <linux/interrupt.h> | ||
51 | #include <linux/atomic.h> | ||
52 | #include <linux/spinlock_types.h> | ||
53 | #include "adf_transport.h" | ||
54 | |||
55 | struct adf_etr_ring_debug_entry { | ||
56 | char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
57 | struct dentry *debug; | ||
58 | }; | ||
59 | |||
60 | struct adf_etr_ring_data { | ||
61 | void *base_addr; | ||
62 | atomic_t *inflights; | ||
63 | spinlock_t lock; /* protects ring data struct */ | ||
64 | adf_callback_fn callback; | ||
65 | struct adf_etr_bank_data *bank; | ||
66 | dma_addr_t dma_addr; | ||
67 | uint16_t head; | ||
68 | uint16_t tail; | ||
69 | uint8_t ring_number; | ||
70 | uint8_t ring_size; | ||
71 | uint8_t msg_size; | ||
72 | uint8_t reserved; | ||
73 | struct adf_etr_ring_debug_entry *ring_debug; | ||
74 | } __packed; | ||
75 | |||
76 | struct adf_etr_bank_data { | ||
77 | struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; | ||
78 | struct tasklet_struct resp_hanlder; | ||
79 | void __iomem *csr_addr; | ||
80 | struct adf_accel_dev *accel_dev; | ||
81 | uint32_t irq_coalesc_timer; | ||
82 | uint16_t ring_mask; | ||
83 | uint16_t irq_mask; | ||
84 | spinlock_t lock; /* protects bank data struct */ | ||
85 | struct dentry *bank_debug_dir; | ||
86 | struct dentry *bank_debug_cfg; | ||
87 | uint32_t bank_number; | ||
88 | } __packed; | ||
89 | |||
90 | struct adf_etr_data { | ||
91 | struct adf_etr_bank_data *banks; | ||
92 | struct dentry *debug; | ||
93 | }; | ||
94 | |||
95 | void adf_response_handler(unsigned long bank_addr); | ||
96 | #ifdef CONFIG_DEBUG_FS | ||
97 | #include <linux/debugfs.h> | ||
98 | int adf_bank_debugfs_add(struct adf_etr_bank_data *bank); | ||
99 | void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank); | ||
100 | int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name); | ||
101 | void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring); | ||
102 | #else | ||
103 | static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | #define adf_bank_debugfs_rm(bank) do {} while (0) | ||
109 | |||
110 | static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, | ||
111 | const char *name) | ||
112 | { | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | #define adf_ring_debugfs_rm(ring) do {} while (0) | ||
117 | #endif | ||
118 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h new file mode 100644 index 000000000000..f1e30e24a419 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _ICP_QAT_FW_H_ | ||
48 | #define _ICP_QAT_FW_H_ | ||
49 | #include <linux/types.h> | ||
50 | #include "icp_qat_hw.h" | ||
51 | |||
52 | #define QAT_FIELD_SET(flags, val, bitpos, mask) \ | ||
53 | { (flags) = (((flags) & (~((mask) << (bitpos)))) | \ | ||
54 | (((val) & (mask)) << (bitpos))) ; } | ||
55 | |||
56 | #define QAT_FIELD_GET(flags, bitpos, mask) \ | ||
57 | (((flags) >> (bitpos)) & (mask)) | ||
58 | |||
59 | #define ICP_QAT_FW_REQ_DEFAULT_SZ 128 | ||
60 | #define ICP_QAT_FW_RESP_DEFAULT_SZ 32 | ||
61 | #define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 | ||
62 | #define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF | ||
63 | #define ICP_QAT_FW_NUM_LONGWORDS_1 1 | ||
64 | #define ICP_QAT_FW_NUM_LONGWORDS_2 2 | ||
65 | #define ICP_QAT_FW_NUM_LONGWORDS_3 3 | ||
66 | #define ICP_QAT_FW_NUM_LONGWORDS_4 4 | ||
67 | #define ICP_QAT_FW_NUM_LONGWORDS_5 5 | ||
68 | #define ICP_QAT_FW_NUM_LONGWORDS_6 6 | ||
69 | #define ICP_QAT_FW_NUM_LONGWORDS_7 7 | ||
70 | #define ICP_QAT_FW_NUM_LONGWORDS_10 10 | ||
71 | #define ICP_QAT_FW_NUM_LONGWORDS_13 13 | ||
72 | #define ICP_QAT_FW_NULL_REQ_SERV_ID 1 | ||
73 | |||
74 | enum icp_qat_fw_comn_resp_serv_id { | ||
75 | ICP_QAT_FW_COMN_RESP_SERV_NULL, | ||
76 | ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, | ||
77 | ICP_QAT_FW_COMN_RESP_SERV_DELIMITER | ||
78 | }; | ||
79 | |||
80 | enum icp_qat_fw_comn_request_id { | ||
81 | ICP_QAT_FW_COMN_REQ_NULL = 0, | ||
82 | ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, | ||
83 | ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, | ||
84 | ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, | ||
85 | ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, | ||
86 | ICP_QAT_FW_COMN_REQ_DELIMITER | ||
87 | }; | ||
88 | |||
89 | struct icp_qat_fw_comn_req_hdr_cd_pars { | ||
90 | union { | ||
91 | struct { | ||
92 | uint64_t content_desc_addr; | ||
93 | uint16_t content_desc_resrvd1; | ||
94 | uint8_t content_desc_params_sz; | ||
95 | uint8_t content_desc_hdr_resrvd2; | ||
96 | uint32_t content_desc_resrvd3; | ||
97 | } s; | ||
98 | struct { | ||
99 | uint32_t serv_specif_fields[4]; | ||
100 | } s1; | ||
101 | } u; | ||
102 | }; | ||
103 | |||
104 | struct icp_qat_fw_comn_req_mid { | ||
105 | uint64_t opaque_data; | ||
106 | uint64_t src_data_addr; | ||
107 | uint64_t dest_data_addr; | ||
108 | uint32_t src_length; | ||
109 | uint32_t dst_length; | ||
110 | }; | ||
111 | |||
112 | struct icp_qat_fw_comn_req_cd_ctrl { | ||
113 | uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; | ||
114 | }; | ||
115 | |||
116 | struct icp_qat_fw_comn_req_hdr { | ||
117 | uint8_t resrvd1; | ||
118 | uint8_t service_cmd_id; | ||
119 | uint8_t service_type; | ||
120 | uint8_t hdr_flags; | ||
121 | uint16_t serv_specif_flags; | ||
122 | uint16_t comn_req_flags; | ||
123 | }; | ||
124 | |||
125 | struct icp_qat_fw_comn_req_rqpars { | ||
126 | uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; | ||
127 | }; | ||
128 | |||
129 | struct icp_qat_fw_comn_req { | ||
130 | struct icp_qat_fw_comn_req_hdr comn_hdr; | ||
131 | struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; | ||
132 | struct icp_qat_fw_comn_req_mid comn_mid; | ||
133 | struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; | ||
134 | struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; | ||
135 | }; | ||
136 | |||
137 | struct icp_qat_fw_comn_error { | ||
138 | uint8_t xlat_err_code; | ||
139 | uint8_t cmp_err_code; | ||
140 | }; | ||
141 | |||
142 | struct icp_qat_fw_comn_resp_hdr { | ||
143 | uint8_t resrvd1; | ||
144 | uint8_t service_id; | ||
145 | uint8_t response_type; | ||
146 | uint8_t hdr_flags; | ||
147 | struct icp_qat_fw_comn_error comn_error; | ||
148 | uint8_t comn_status; | ||
149 | uint8_t cmd_id; | ||
150 | }; | ||
151 | |||
152 | struct icp_qat_fw_comn_resp { | ||
153 | struct icp_qat_fw_comn_resp_hdr comn_hdr; | ||
154 | uint64_t opaque_data; | ||
155 | uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
156 | }; | ||
157 | |||
158 | #define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 | ||
159 | #define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 | ||
160 | #define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 | ||
161 | #define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 | ||
162 | #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F | ||
163 | |||
164 | #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ | ||
165 | icp_qat_fw_comn_req_hdr_t.service_type | ||
166 | |||
167 | #define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ | ||
168 | icp_qat_fw_comn_req_hdr_t.service_type = val | ||
169 | |||
170 | #define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ | ||
171 | icp_qat_fw_comn_req_hdr_t.service_cmd_id | ||
172 | |||
173 | #define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ | ||
174 | icp_qat_fw_comn_req_hdr_t.service_cmd_id = val | ||
175 | |||
176 | #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ | ||
177 | ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) | ||
178 | |||
179 | #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ | ||
180 | ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) | ||
181 | |||
182 | #define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ | ||
183 | QAT_FIELD_GET(hdr_flags, \ | ||
184 | ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ | ||
185 | ICP_QAT_FW_COMN_VALID_FLAG_MASK) | ||
186 | |||
187 | #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ | ||
188 | (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) | ||
189 | |||
190 | #define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ | ||
191 | QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ | ||
192 | ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ | ||
193 | ICP_QAT_FW_COMN_VALID_FLAG_MASK) | ||
194 | |||
195 | #define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ | ||
196 | (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ | ||
197 | ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | ||
198 | |||
199 | #define QAT_COMN_PTR_TYPE_BITPOS 0 | ||
200 | #define QAT_COMN_PTR_TYPE_MASK 0x1 | ||
201 | #define QAT_COMN_CD_FLD_TYPE_BITPOS 1 | ||
202 | #define QAT_COMN_CD_FLD_TYPE_MASK 0x1 | ||
203 | #define QAT_COMN_PTR_TYPE_FLAT 0x0 | ||
204 | #define QAT_COMN_PTR_TYPE_SGL 0x1 | ||
205 | #define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 | ||
206 | #define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 | ||
207 | |||
208 | #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ | ||
209 | ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ | ||
210 | | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) | ||
211 | |||
212 | #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ | ||
213 | QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) | ||
214 | |||
215 | #define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ | ||
216 | QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ | ||
217 | QAT_COMN_CD_FLD_TYPE_MASK) | ||
218 | |||
219 | #define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ | ||
220 | QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ | ||
221 | QAT_COMN_PTR_TYPE_MASK) | ||
222 | |||
223 | #define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ | ||
224 | QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ | ||
225 | QAT_COMN_CD_FLD_TYPE_MASK) | ||
226 | |||
227 | #define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 | ||
228 | #define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 | ||
229 | #define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 | ||
230 | #define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F | ||
231 | |||
232 | #define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ | ||
233 | ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ | ||
234 | >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) | ||
235 | |||
236 | #define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ | ||
237 | { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ | ||
238 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ | ||
239 | ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ | ||
240 | & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } | ||
241 | |||
242 | #define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ | ||
243 | (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | ||
244 | |||
245 | #define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ | ||
246 | { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ | ||
247 | & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ | ||
248 | ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } | ||
249 | |||
250 | #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 | ||
251 | #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 | ||
252 | #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 | ||
253 | #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 | ||
254 | #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 | ||
255 | #define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 | ||
256 | #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 | ||
257 | #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 | ||
258 | |||
259 | #define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \ | ||
260 | ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \ | ||
261 | QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ | ||
262 | (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \ | ||
263 | QAT_COMN_RESP_CMP_STATUS_BITPOS) | \ | ||
264 | (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \ | ||
265 | QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \ | ||
266 | (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \ | ||
267 | QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS)) | ||
268 | |||
269 | #define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ | ||
270 | QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ | ||
271 | QAT_COMN_RESP_CRYPTO_STATUS_MASK) | ||
272 | |||
273 | #define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ | ||
274 | QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ | ||
275 | QAT_COMN_RESP_CMP_STATUS_MASK) | ||
276 | |||
277 | #define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ | ||
278 | QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ | ||
279 | QAT_COMN_RESP_XLAT_STATUS_MASK) | ||
280 | |||
281 | #define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ | ||
282 | QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ | ||
283 | QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) | ||
284 | |||
285 | #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 | ||
286 | #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 | ||
287 | #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 | ||
288 | #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 | ||
289 | #define ERR_CODE_NO_ERROR 0 | ||
290 | #define ERR_CODE_INVALID_BLOCK_TYPE -1 | ||
291 | #define ERR_CODE_NO_MATCH_ONES_COMP -2 | ||
292 | #define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 | ||
293 | #define ERR_CODE_INCOMPLETE_LEN -4 | ||
294 | #define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 | ||
295 | #define ERR_CODE_RPT_GT_SPEC_LEN -6 | ||
296 | #define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 | ||
297 | #define ERR_CODE_INV_DIS_CODE_LEN -8 | ||
298 | #define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 | ||
299 | #define ERR_CODE_DIS_TOO_FAR_BACK -10 | ||
300 | #define ERR_CODE_OVERFLOW_ERROR -11 | ||
301 | #define ERR_CODE_SOFT_ERROR -12 | ||
302 | #define ERR_CODE_FATAL_ERROR -13 | ||
303 | #define ERR_CODE_SSM_ERROR -14 | ||
304 | #define ERR_CODE_ENDPOINT_ERROR -15 | ||
305 | |||
306 | enum icp_qat_fw_slice { | ||
307 | ICP_QAT_FW_SLICE_NULL = 0, | ||
308 | ICP_QAT_FW_SLICE_CIPHER = 1, | ||
309 | ICP_QAT_FW_SLICE_AUTH = 2, | ||
310 | ICP_QAT_FW_SLICE_DRAM_RD = 3, | ||
311 | ICP_QAT_FW_SLICE_DRAM_WR = 4, | ||
312 | ICP_QAT_FW_SLICE_COMP = 5, | ||
313 | ICP_QAT_FW_SLICE_XLAT = 6, | ||
314 | ICP_QAT_FW_SLICE_DELIMITER | ||
315 | }; | ||
316 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h new file mode 100644 index 000000000000..72a59faa9005 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _ICP_QAT_FW_INIT_ADMIN_H_ | ||
48 | #define _ICP_QAT_FW_INIT_ADMIN_H_ | ||
49 | |||
50 | #include "icp_qat_fw.h" | ||
51 | |||
52 | enum icp_qat_fw_init_admin_cmd_id { | ||
53 | ICP_QAT_FW_INIT_ME = 0, | ||
54 | ICP_QAT_FW_TRNG_ENABLE = 1, | ||
55 | ICP_QAT_FW_TRNG_DISABLE = 2, | ||
56 | ICP_QAT_FW_CONSTANTS_CFG = 3, | ||
57 | ICP_QAT_FW_STATUS_GET = 4, | ||
58 | ICP_QAT_FW_COUNTERS_GET = 5, | ||
59 | ICP_QAT_FW_LOOPBACK = 6, | ||
60 | ICP_QAT_FW_HEARTBEAT_SYNC = 7, | ||
61 | ICP_QAT_FW_HEARTBEAT_GET = 8 | ||
62 | }; | ||
63 | |||
64 | enum icp_qat_fw_init_admin_resp_status { | ||
65 | ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0, | ||
66 | ICP_QAT_FW_INIT_RESP_STATUS_FAIL | ||
67 | }; | ||
68 | |||
69 | struct icp_qat_fw_init_admin_req { | ||
70 | uint16_t init_cfg_sz; | ||
71 | uint8_t resrvd1; | ||
72 | uint8_t init_admin_cmd_id; | ||
73 | uint32_t resrvd2; | ||
74 | uint64_t opaque_data; | ||
75 | uint64_t init_cfg_ptr; | ||
76 | uint64_t resrvd3; | ||
77 | }; | ||
78 | |||
79 | struct icp_qat_fw_init_admin_resp_hdr { | ||
80 | uint8_t flags; | ||
81 | uint8_t resrvd1; | ||
82 | uint8_t status; | ||
83 | uint8_t init_admin_cmd_id; | ||
84 | }; | ||
85 | |||
86 | struct icp_qat_fw_init_admin_resp_pars { | ||
87 | union { | ||
88 | uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
89 | struct { | ||
90 | uint32_t version_patch_num; | ||
91 | uint8_t context_id; | ||
92 | uint8_t ae_id; | ||
93 | uint16_t resrvd1; | ||
94 | uint64_t resrvd2; | ||
95 | } s1; | ||
96 | struct { | ||
97 | uint64_t req_rec_count; | ||
98 | uint64_t resp_sent_count; | ||
99 | } s2; | ||
100 | } u; | ||
101 | }; | ||
102 | |||
103 | struct icp_qat_fw_init_admin_resp { | ||
104 | struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr; | ||
105 | union { | ||
106 | uint32_t resrvd2; | ||
107 | struct { | ||
108 | uint16_t version_minor_num; | ||
109 | uint16_t version_major_num; | ||
110 | } s; | ||
111 | } u; | ||
112 | uint64_t opaque_data; | ||
113 | struct icp_qat_fw_init_admin_resp_pars init_resp_pars; | ||
114 | }; | ||
115 | |||
116 | #define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 | ||
117 | #define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1 | ||
118 | #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0 | ||
119 | #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1 | ||
120 | #define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE | ||
121 | #define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \ | ||
122 | ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags) | ||
123 | |||
124 | #define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \ | ||
125 | ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val) | ||
126 | |||
127 | #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \ | ||
128 | QAT_FIELD_GET(flags, \ | ||
129 | ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \ | ||
130 | ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK) | ||
131 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h new file mode 100644 index 000000000000..c8d26697e8ea --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h | |||
@@ -0,0 +1,404 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _ICP_QAT_FW_LA_H_ | ||
48 | #define _ICP_QAT_FW_LA_H_ | ||
49 | #include "icp_qat_fw.h" | ||
50 | |||
51 | enum icp_qat_fw_la_cmd_id { | ||
52 | ICP_QAT_FW_LA_CMD_CIPHER = 0, | ||
53 | ICP_QAT_FW_LA_CMD_AUTH = 1, | ||
54 | ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, | ||
55 | ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, | ||
56 | ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, | ||
57 | ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, | ||
58 | ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, | ||
59 | ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, | ||
60 | ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, | ||
61 | ICP_QAT_FW_LA_CMD_MGF1 = 9, | ||
62 | ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, | ||
63 | ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, | ||
64 | ICP_QAT_FW_LA_CMD_DELIMITER = 12 | ||
65 | }; | ||
66 | |||
67 | #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK | ||
68 | #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR | ||
69 | #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK | ||
70 | #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR | ||
71 | |||
72 | struct icp_qat_fw_la_bulk_req { | ||
73 | struct icp_qat_fw_comn_req_hdr comn_hdr; | ||
74 | struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; | ||
75 | struct icp_qat_fw_comn_req_mid comn_mid; | ||
76 | struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; | ||
77 | struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; | ||
78 | }; | ||
79 | |||
80 | #define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 | ||
81 | #define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 | ||
82 | #define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 | ||
83 | #define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 | ||
84 | #define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 | ||
85 | #define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 | ||
86 | #define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 | ||
87 | #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 | ||
88 | #define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 | ||
89 | #define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 | ||
90 | #define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 | ||
91 | #define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 | ||
92 | #define ICP_QAT_FW_LA_GCM_PROTO 2 | ||
93 | #define ICP_QAT_FW_LA_CCM_PROTO 1 | ||
94 | #define ICP_QAT_FW_LA_NO_PROTO 0 | ||
95 | #define QAT_LA_PROTO_BITPOS 7 | ||
96 | #define QAT_LA_PROTO_MASK 0x7 | ||
97 | #define ICP_QAT_FW_LA_CMP_AUTH_RES 1 | ||
98 | #define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 | ||
99 | #define QAT_LA_CMP_AUTH_RES_BITPOS 6 | ||
100 | #define QAT_LA_CMP_AUTH_RES_MASK 0x1 | ||
101 | #define ICP_QAT_FW_LA_RET_AUTH_RES 1 | ||
102 | #define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 | ||
103 | #define QAT_LA_RET_AUTH_RES_BITPOS 5 | ||
104 | #define QAT_LA_RET_AUTH_RES_MASK 0x1 | ||
105 | #define ICP_QAT_FW_LA_UPDATE_STATE 1 | ||
106 | #define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 | ||
107 | #define QAT_LA_UPDATE_STATE_BITPOS 4 | ||
108 | #define QAT_LA_UPDATE_STATE_MASK 0x1 | ||
109 | #define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 | ||
110 | #define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 | ||
111 | #define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 | ||
112 | #define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 | ||
113 | #define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 | ||
114 | #define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 | ||
115 | #define QAT_LA_CIPH_IV_FLD_BITPOS 2 | ||
116 | #define QAT_LA_CIPH_IV_FLD_MASK 0x1 | ||
117 | #define ICP_QAT_FW_LA_PARTIAL_NONE 0 | ||
118 | #define ICP_QAT_FW_LA_PARTIAL_START 1 | ||
119 | #define ICP_QAT_FW_LA_PARTIAL_MID 3 | ||
120 | #define ICP_QAT_FW_LA_PARTIAL_END 2 | ||
121 | #define QAT_LA_PARTIAL_BITPOS 0 | ||
122 | #define QAT_LA_PARTIAL_MASK 0x3 | ||
123 | #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ | ||
124 | cmp_auth, ret_auth, update_state, \ | ||
125 | ciph_iv, ciphcfg, partial) \ | ||
126 | (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ | ||
127 | QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ | ||
128 | ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ | ||
129 | QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ | ||
130 | ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ | ||
131 | QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ | ||
132 | ((proto & QAT_LA_PROTO_MASK) << \ | ||
133 | QAT_LA_PROTO_BITPOS) | \ | ||
134 | ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ | ||
135 | QAT_LA_CMP_AUTH_RES_BITPOS) | \ | ||
136 | ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ | ||
137 | QAT_LA_RET_AUTH_RES_BITPOS) | \ | ||
138 | ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ | ||
139 | QAT_LA_UPDATE_STATE_BITPOS) | \ | ||
140 | ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ | ||
141 | QAT_LA_CIPH_IV_FLD_BITPOS) | \ | ||
142 | ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ | ||
143 | QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ | ||
144 | ((partial & QAT_LA_PARTIAL_MASK) << \ | ||
145 | QAT_LA_PARTIAL_BITPOS)) | ||
146 | |||
147 | #define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ | ||
148 | QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ | ||
149 | QAT_LA_CIPH_IV_FLD_MASK) | ||
150 | |||
151 | #define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ | ||
152 | QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ | ||
153 | QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) | ||
154 | |||
155 | #define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ | ||
156 | QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ | ||
157 | QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) | ||
158 | |||
159 | #define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ | ||
160 | QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ | ||
161 | QAT_LA_GCM_IV_LEN_FLAG_MASK) | ||
162 | |||
163 | #define ICP_QAT_FW_LA_PROTO_GET(flags) \ | ||
164 | QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) | ||
165 | |||
166 | #define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ | ||
167 | QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ | ||
168 | QAT_LA_CMP_AUTH_RES_MASK) | ||
169 | |||
170 | #define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ | ||
171 | QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ | ||
172 | QAT_LA_RET_AUTH_RES_MASK) | ||
173 | |||
174 | #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ | ||
175 | QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ | ||
176 | QAT_LA_DIGEST_IN_BUFFER_MASK) | ||
177 | |||
178 | #define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ | ||
179 | QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ | ||
180 | QAT_LA_UPDATE_STATE_MASK) | ||
181 | |||
182 | #define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ | ||
183 | QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ | ||
184 | QAT_LA_PARTIAL_MASK) | ||
185 | |||
186 | #define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ | ||
187 | QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ | ||
188 | QAT_LA_CIPH_IV_FLD_MASK) | ||
189 | |||
190 | #define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ | ||
191 | QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ | ||
192 | QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) | ||
193 | |||
194 | #define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ | ||
195 | QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ | ||
196 | QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) | ||
197 | |||
198 | #define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ | ||
199 | QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ | ||
200 | QAT_LA_GCM_IV_LEN_FLAG_MASK) | ||
201 | |||
202 | #define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ | ||
203 | QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ | ||
204 | QAT_LA_PROTO_MASK) | ||
205 | |||
206 | #define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ | ||
207 | QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ | ||
208 | QAT_LA_CMP_AUTH_RES_MASK) | ||
209 | |||
210 | #define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ | ||
211 | QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ | ||
212 | QAT_LA_RET_AUTH_RES_MASK) | ||
213 | |||
214 | #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ | ||
215 | QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ | ||
216 | QAT_LA_DIGEST_IN_BUFFER_MASK) | ||
217 | |||
218 | #define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ | ||
219 | QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ | ||
220 | QAT_LA_UPDATE_STATE_MASK) | ||
221 | |||
222 | #define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ | ||
223 | QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ | ||
224 | QAT_LA_PARTIAL_MASK) | ||
225 | |||
226 | struct icp_qat_fw_cipher_req_hdr_cd_pars { | ||
227 | union { | ||
228 | struct { | ||
229 | uint64_t content_desc_addr; | ||
230 | uint16_t content_desc_resrvd1; | ||
231 | uint8_t content_desc_params_sz; | ||
232 | uint8_t content_desc_hdr_resrvd2; | ||
233 | uint32_t content_desc_resrvd3; | ||
234 | } s; | ||
235 | struct { | ||
236 | uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
237 | } s1; | ||
238 | } u; | ||
239 | }; | ||
240 | |||
241 | struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { | ||
242 | union { | ||
243 | struct { | ||
244 | uint64_t content_desc_addr; | ||
245 | uint16_t content_desc_resrvd1; | ||
246 | uint8_t content_desc_params_sz; | ||
247 | uint8_t content_desc_hdr_resrvd2; | ||
248 | uint32_t content_desc_resrvd3; | ||
249 | } s; | ||
250 | struct { | ||
251 | uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
252 | } sl; | ||
253 | } u; | ||
254 | }; | ||
255 | |||
256 | struct icp_qat_fw_cipher_cd_ctrl_hdr { | ||
257 | uint8_t cipher_state_sz; | ||
258 | uint8_t cipher_key_sz; | ||
259 | uint8_t cipher_cfg_offset; | ||
260 | uint8_t next_curr_id; | ||
261 | uint8_t cipher_padding_sz; | ||
262 | uint8_t resrvd1; | ||
263 | uint16_t resrvd2; | ||
264 | uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; | ||
265 | }; | ||
266 | |||
267 | struct icp_qat_fw_auth_cd_ctrl_hdr { | ||
268 | uint32_t resrvd1; | ||
269 | uint8_t resrvd2; | ||
270 | uint8_t hash_flags; | ||
271 | uint8_t hash_cfg_offset; | ||
272 | uint8_t next_curr_id; | ||
273 | uint8_t resrvd3; | ||
274 | uint8_t outer_prefix_sz; | ||
275 | uint8_t final_sz; | ||
276 | uint8_t inner_res_sz; | ||
277 | uint8_t resrvd4; | ||
278 | uint8_t inner_state1_sz; | ||
279 | uint8_t inner_state2_offset; | ||
280 | uint8_t inner_state2_sz; | ||
281 | uint8_t outer_config_offset; | ||
282 | uint8_t outer_state1_sz; | ||
283 | uint8_t outer_res_sz; | ||
284 | uint8_t outer_prefix_offset; | ||
285 | }; | ||
286 | |||
287 | struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { | ||
288 | uint8_t cipher_state_sz; | ||
289 | uint8_t cipher_key_sz; | ||
290 | uint8_t cipher_cfg_offset; | ||
291 | uint8_t next_curr_id_cipher; | ||
292 | uint8_t cipher_padding_sz; | ||
293 | uint8_t hash_flags; | ||
294 | uint8_t hash_cfg_offset; | ||
295 | uint8_t next_curr_id_auth; | ||
296 | uint8_t resrvd1; | ||
297 | uint8_t outer_prefix_sz; | ||
298 | uint8_t final_sz; | ||
299 | uint8_t inner_res_sz; | ||
300 | uint8_t resrvd2; | ||
301 | uint8_t inner_state1_sz; | ||
302 | uint8_t inner_state2_offset; | ||
303 | uint8_t inner_state2_sz; | ||
304 | uint8_t outer_config_offset; | ||
305 | uint8_t outer_state1_sz; | ||
306 | uint8_t outer_res_sz; | ||
307 | uint8_t outer_prefix_offset; | ||
308 | }; | ||
309 | |||
310 | #define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 | ||
311 | #define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 | ||
312 | #define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 | ||
313 | #define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ | ||
314 | (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) | ||
315 | #define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) | ||
316 | |||
317 | struct icp_qat_fw_la_cipher_req_params { | ||
318 | uint32_t cipher_offset; | ||
319 | uint32_t cipher_length; | ||
320 | union { | ||
321 | uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
322 | struct { | ||
323 | uint64_t cipher_IV_ptr; | ||
324 | uint64_t resrvd1; | ||
325 | } s; | ||
326 | } u; | ||
327 | }; | ||
328 | |||
329 | struct icp_qat_fw_la_auth_req_params { | ||
330 | uint32_t auth_off; | ||
331 | uint32_t auth_len; | ||
332 | union { | ||
333 | uint64_t auth_partial_st_prefix; | ||
334 | uint64_t aad_adr; | ||
335 | } u1; | ||
336 | uint64_t auth_res_addr; | ||
337 | union { | ||
338 | uint8_t inner_prefix_sz; | ||
339 | uint8_t aad_sz; | ||
340 | } u2; | ||
341 | uint8_t resrvd1; | ||
342 | uint8_t hash_state_sz; | ||
343 | uint8_t auth_res_sz; | ||
344 | } __packed; | ||
345 | |||
346 | struct icp_qat_fw_la_auth_req_params_resrvd_flds { | ||
347 | uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; | ||
348 | union { | ||
349 | uint8_t inner_prefix_sz; | ||
350 | uint8_t aad_sz; | ||
351 | } u2; | ||
352 | uint8_t resrvd1; | ||
353 | uint16_t resrvd2; | ||
354 | }; | ||
355 | |||
356 | struct icp_qat_fw_la_resp { | ||
357 | struct icp_qat_fw_comn_resp_hdr comn_resp; | ||
358 | uint64_t opaque_data; | ||
359 | uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; | ||
360 | }; | ||
361 | |||
362 | #define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ | ||
363 | ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ | ||
364 | ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) | ||
365 | |||
366 | #define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ | ||
367 | { (cd_ctrl_hdr_t)->next_curr_id_cipher = \ | ||
368 | ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ | ||
369 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ | ||
370 | ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ | ||
371 | & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } | ||
372 | |||
373 | #define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ | ||
374 | (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ | ||
375 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | ||
376 | |||
377 | #define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ | ||
378 | { (cd_ctrl_hdr_t)->next_curr_id_cipher = \ | ||
379 | ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ | ||
380 | & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ | ||
381 | ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } | ||
382 | |||
383 | #define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ | ||
384 | ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ | ||
385 | >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) | ||
386 | |||
387 | #define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ | ||
388 | { (cd_ctrl_hdr_t)->next_curr_id_auth = \ | ||
389 | ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ | ||
390 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ | ||
391 | ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ | ||
392 | & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } | ||
393 | |||
394 | #define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ | ||
395 | (((cd_ctrl_hdr_t)->next_curr_id_auth) \ | ||
396 | & ICP_QAT_FW_COMN_CURR_ID_MASK) | ||
397 | |||
398 | #define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ | ||
399 | { (cd_ctrl_hdr_t)->next_curr_id_auth = \ | ||
400 | ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ | ||
401 | & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ | ||
402 | ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } | ||
403 | |||
404 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h new file mode 100644 index 000000000000..5e1aa40c0404 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef __ICP_QAT_FW_LOADER_HANDLE_H__ | ||
48 | #define __ICP_QAT_FW_LOADER_HANDLE_H__ | ||
49 | #include "icp_qat_uclo.h" | ||
50 | |||
51 | struct icp_qat_fw_loader_ae_data { | ||
52 | unsigned int state; | ||
53 | unsigned int ustore_size; | ||
54 | unsigned int free_addr; | ||
55 | unsigned int free_size; | ||
56 | unsigned int live_ctx_mask; | ||
57 | }; | ||
58 | |||
59 | struct icp_qat_fw_loader_hal_handle { | ||
60 | struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE]; | ||
61 | unsigned int ae_mask; | ||
62 | unsigned int slice_mask; | ||
63 | unsigned int revision_id; | ||
64 | unsigned int ae_max_num; | ||
65 | unsigned int upc_mask; | ||
66 | unsigned int max_ustore; | ||
67 | }; | ||
68 | |||
69 | struct icp_qat_fw_loader_handle { | ||
70 | struct icp_qat_fw_loader_hal_handle *hal_handle; | ||
71 | void *obj_handle; | ||
72 | void __iomem *hal_sram_addr_v; | ||
73 | void __iomem *hal_cap_g_ctl_csr_addr_v; | ||
74 | void __iomem *hal_cap_ae_xfer_csr_addr_v; | ||
75 | void __iomem *hal_cap_ae_local_csr_addr_v; | ||
76 | void __iomem *hal_ep_csr_addr_v; | ||
77 | }; | ||
78 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h new file mode 100644 index 000000000000..85b6d241ea82 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef __ICP_QAT_HAL_H | ||
48 | #define __ICP_QAT_HAL_H | ||
49 | #include "icp_qat_fw_loader_handle.h" | ||
50 | |||
51 | enum hal_global_csr { | ||
52 | MISC_CONTROL = 0x04, | ||
53 | ICP_RESET = 0x0c, | ||
54 | ICP_GLOBAL_CLK_ENABLE = 0x50 | ||
55 | }; | ||
56 | |||
57 | enum hal_ae_csr { | ||
58 | USTORE_ADDRESS = 0x000, | ||
59 | USTORE_DATA_LOWER = 0x004, | ||
60 | USTORE_DATA_UPPER = 0x008, | ||
61 | ALU_OUT = 0x010, | ||
62 | CTX_ARB_CNTL = 0x014, | ||
63 | CTX_ENABLES = 0x018, | ||
64 | CC_ENABLE = 0x01c, | ||
65 | CSR_CTX_POINTER = 0x020, | ||
66 | CTX_STS_INDIRECT = 0x040, | ||
67 | ACTIVE_CTX_STATUS = 0x044, | ||
68 | CTX_SIG_EVENTS_INDIRECT = 0x048, | ||
69 | CTX_SIG_EVENTS_ACTIVE = 0x04c, | ||
70 | CTX_WAKEUP_EVENTS_INDIRECT = 0x050, | ||
71 | LM_ADDR_0_INDIRECT = 0x060, | ||
72 | LM_ADDR_1_INDIRECT = 0x068, | ||
73 | INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0, | ||
74 | INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8, | ||
75 | FUTURE_COUNT_SIGNAL_INDIRECT = 0x078, | ||
76 | TIMESTAMP_LOW = 0x0c0, | ||
77 | TIMESTAMP_HIGH = 0x0c4, | ||
78 | PROFILE_COUNT = 0x144, | ||
79 | SIGNATURE_ENABLE = 0x150, | ||
80 | AE_MISC_CONTROL = 0x160, | ||
81 | LOCAL_CSR_STATUS = 0x180, | ||
82 | }; | ||
83 | |||
84 | #define UA_ECS (0x1 << 31) | ||
85 | #define ACS_ABO_BITPOS 31 | ||
86 | #define ACS_ACNO 0x7 | ||
87 | #define CE_ENABLE_BITPOS 0x8 | ||
88 | #define CE_LMADDR_0_GLOBAL_BITPOS 16 | ||
89 | #define CE_LMADDR_1_GLOBAL_BITPOS 17 | ||
90 | #define CE_NN_MODE_BITPOS 20 | ||
91 | #define CE_REG_PAR_ERR_BITPOS 25 | ||
92 | #define CE_BREAKPOINT_BITPOS 27 | ||
93 | #define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29 | ||
94 | #define CE_INUSE_CONTEXTS_BITPOS 31 | ||
95 | #define CE_NN_MODE (0x1 << CE_NN_MODE_BITPOS) | ||
96 | #define CE_INUSE_CONTEXTS (0x1 << CE_INUSE_CONTEXTS_BITPOS) | ||
97 | #define XCWE_VOLUNTARY (0x1) | ||
98 | #define LCS_STATUS (0x1) | ||
99 | #define MMC_SHARE_CS_BITPOS 2 | ||
100 | #define GLOBAL_CSR 0xA00 | ||
101 | |||
102 | #define SET_CAP_CSR(handle, csr, val) \ | ||
103 | ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) | ||
104 | #define GET_CAP_CSR(handle, csr) \ | ||
105 | ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr) | ||
106 | #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) | ||
107 | #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) | ||
108 | #define AE_CSR(handle, ae) \ | ||
109 | (handle->hal_cap_ae_local_csr_addr_v + \ | ||
110 | ((ae & handle->hal_handle->ae_mask) << 12)) | ||
111 | #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) | ||
112 | #define SET_AE_CSR(handle, ae, csr, val) \ | ||
113 | ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) | ||
114 | #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) | ||
115 | #define AE_XFER(handle, ae) \ | ||
116 | (handle->hal_cap_ae_xfer_csr_addr_v + \ | ||
117 | ((ae & handle->hal_handle->ae_mask) << 12)) | ||
118 | #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ | ||
119 | ((reg & 0xff) << 2)) | ||
120 | #define SET_AE_XFER(handle, ae, reg, val) \ | ||
121 | ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) | ||
122 | #define SRAM_WRITE(handle, addr, val) \ | ||
123 | ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) | ||
124 | #define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr) | ||
125 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h new file mode 100644 index 000000000000..5031f8c10d75 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h | |||
@@ -0,0 +1,305 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _ICP_QAT_HW_H_ | ||
48 | #define _ICP_QAT_HW_H_ | ||
49 | |||
50 | enum icp_qat_hw_ae_id { | ||
51 | ICP_QAT_HW_AE_0 = 0, | ||
52 | ICP_QAT_HW_AE_1 = 1, | ||
53 | ICP_QAT_HW_AE_2 = 2, | ||
54 | ICP_QAT_HW_AE_3 = 3, | ||
55 | ICP_QAT_HW_AE_4 = 4, | ||
56 | ICP_QAT_HW_AE_5 = 5, | ||
57 | ICP_QAT_HW_AE_6 = 6, | ||
58 | ICP_QAT_HW_AE_7 = 7, | ||
59 | ICP_QAT_HW_AE_8 = 8, | ||
60 | ICP_QAT_HW_AE_9 = 9, | ||
61 | ICP_QAT_HW_AE_10 = 10, | ||
62 | ICP_QAT_HW_AE_11 = 11, | ||
63 | ICP_QAT_HW_AE_DELIMITER = 12 | ||
64 | }; | ||
65 | |||
66 | enum icp_qat_hw_qat_id { | ||
67 | ICP_QAT_HW_QAT_0 = 0, | ||
68 | ICP_QAT_HW_QAT_1 = 1, | ||
69 | ICP_QAT_HW_QAT_2 = 2, | ||
70 | ICP_QAT_HW_QAT_3 = 3, | ||
71 | ICP_QAT_HW_QAT_4 = 4, | ||
72 | ICP_QAT_HW_QAT_5 = 5, | ||
73 | ICP_QAT_HW_QAT_DELIMITER = 6 | ||
74 | }; | ||
75 | |||
76 | enum icp_qat_hw_auth_algo { | ||
77 | ICP_QAT_HW_AUTH_ALGO_NULL = 0, | ||
78 | ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, | ||
79 | ICP_QAT_HW_AUTH_ALGO_MD5 = 2, | ||
80 | ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, | ||
81 | ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, | ||
82 | ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, | ||
83 | ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, | ||
84 | ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, | ||
85 | ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, | ||
86 | ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, | ||
87 | ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, | ||
88 | ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, | ||
89 | ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, | ||
90 | ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, | ||
91 | ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, | ||
92 | ICP_QAT_HW_AUTH_RESERVED_1 = 15, | ||
93 | ICP_QAT_HW_AUTH_RESERVED_2 = 16, | ||
94 | ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, | ||
95 | ICP_QAT_HW_AUTH_RESERVED_3 = 18, | ||
96 | ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, | ||
97 | ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 | ||
98 | }; | ||
99 | |||
100 | enum icp_qat_hw_auth_mode { | ||
101 | ICP_QAT_HW_AUTH_MODE0 = 0, | ||
102 | ICP_QAT_HW_AUTH_MODE1 = 1, | ||
103 | ICP_QAT_HW_AUTH_MODE2 = 2, | ||
104 | ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 | ||
105 | }; | ||
106 | |||
107 | struct icp_qat_hw_auth_config { | ||
108 | uint32_t config; | ||
109 | uint32_t reserved; | ||
110 | }; | ||
111 | |||
112 | #define QAT_AUTH_MODE_BITPOS 4 | ||
113 | #define QAT_AUTH_MODE_MASK 0xF | ||
114 | #define QAT_AUTH_ALGO_BITPOS 0 | ||
115 | #define QAT_AUTH_ALGO_MASK 0xF | ||
116 | #define QAT_AUTH_CMP_BITPOS 8 | ||
117 | #define QAT_AUTH_CMP_MASK 0x7F | ||
118 | #define QAT_AUTH_SHA3_PADDING_BITPOS 16 | ||
119 | #define QAT_AUTH_SHA3_PADDING_MASK 0x1 | ||
120 | #define QAT_AUTH_ALGO_SHA3_BITPOS 22 | ||
121 | #define QAT_AUTH_ALGO_SHA3_MASK 0x3 | ||
122 | #define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ | ||
123 | (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ | ||
124 | ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ | ||
125 | (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ | ||
126 | QAT_AUTH_ALGO_SHA3_BITPOS) | \ | ||
127 | (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ | ||
128 | (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ | ||
129 | & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ | ||
130 | ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) | ||
131 | |||
132 | struct icp_qat_hw_auth_counter { | ||
133 | __be32 counter; | ||
134 | uint32_t reserved; | ||
135 | }; | ||
136 | |||
137 | #define QAT_AUTH_COUNT_MASK 0xFFFFFFFF | ||
138 | #define QAT_AUTH_COUNT_BITPOS 0 | ||
139 | #define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ | ||
140 | (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) | ||
141 | |||
142 | struct icp_qat_hw_auth_setup { | ||
143 | struct icp_qat_hw_auth_config auth_config; | ||
144 | struct icp_qat_hw_auth_counter auth_counter; | ||
145 | }; | ||
146 | |||
147 | #define QAT_HW_DEFAULT_ALIGNMENT 8 | ||
148 | #define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1))) | ||
149 | #define ICP_QAT_HW_NULL_STATE1_SZ 32 | ||
150 | #define ICP_QAT_HW_MD5_STATE1_SZ 16 | ||
151 | #define ICP_QAT_HW_SHA1_STATE1_SZ 20 | ||
152 | #define ICP_QAT_HW_SHA224_STATE1_SZ 32 | ||
153 | #define ICP_QAT_HW_SHA256_STATE1_SZ 32 | ||
154 | #define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 | ||
155 | #define ICP_QAT_HW_SHA384_STATE1_SZ 64 | ||
156 | #define ICP_QAT_HW_SHA512_STATE1_SZ 64 | ||
157 | #define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 | ||
158 | #define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 | ||
159 | #define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 | ||
160 | #define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 | ||
161 | #define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 | ||
162 | #define ICP_QAT_HW_AES_F9_STATE1_SZ 32 | ||
163 | #define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 | ||
164 | #define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 | ||
165 | #define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 | ||
166 | #define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 | ||
167 | #define ICP_QAT_HW_NULL_STATE2_SZ 32 | ||
168 | #define ICP_QAT_HW_MD5_STATE2_SZ 16 | ||
169 | #define ICP_QAT_HW_SHA1_STATE2_SZ 20 | ||
170 | #define ICP_QAT_HW_SHA224_STATE2_SZ 32 | ||
171 | #define ICP_QAT_HW_SHA256_STATE2_SZ 32 | ||
172 | #define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 | ||
173 | #define ICP_QAT_HW_SHA384_STATE2_SZ 64 | ||
174 | #define ICP_QAT_HW_SHA512_STATE2_SZ 64 | ||
175 | #define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 | ||
176 | #define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 | ||
177 | #define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 | ||
178 | #define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 | ||
179 | #define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 | ||
180 | #define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 | ||
181 | #define ICP_QAT_HW_F9_IK_SZ 16 | ||
182 | #define ICP_QAT_HW_F9_FK_SZ 16 | ||
183 | #define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ | ||
184 | ICP_QAT_HW_F9_FK_SZ) | ||
185 | #define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ | ||
186 | #define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 | ||
187 | #define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 | ||
188 | #define ICP_QAT_HW_GALOIS_H_SZ 16 | ||
189 | #define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 | ||
190 | #define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 | ||
191 | |||
192 | struct icp_qat_hw_auth_sha512 { | ||
193 | struct icp_qat_hw_auth_setup inner_setup; | ||
194 | uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; | ||
195 | struct icp_qat_hw_auth_setup outer_setup; | ||
196 | uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; | ||
197 | }; | ||
198 | |||
199 | struct icp_qat_hw_auth_algo_blk { | ||
200 | struct icp_qat_hw_auth_sha512 sha; | ||
201 | }; | ||
202 | |||
203 | #define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 | ||
204 | #define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF | ||
205 | |||
206 | enum icp_qat_hw_cipher_algo { | ||
207 | ICP_QAT_HW_CIPHER_ALGO_NULL = 0, | ||
208 | ICP_QAT_HW_CIPHER_ALGO_DES = 1, | ||
209 | ICP_QAT_HW_CIPHER_ALGO_3DES = 2, | ||
210 | ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, | ||
211 | ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, | ||
212 | ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, | ||
213 | ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, | ||
214 | ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, | ||
215 | ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, | ||
216 | ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, | ||
217 | ICP_QAT_HW_CIPHER_DELIMITER = 10 | ||
218 | }; | ||
219 | |||
220 | enum icp_qat_hw_cipher_mode { | ||
221 | ICP_QAT_HW_CIPHER_ECB_MODE = 0, | ||
222 | ICP_QAT_HW_CIPHER_CBC_MODE = 1, | ||
223 | ICP_QAT_HW_CIPHER_CTR_MODE = 2, | ||
224 | ICP_QAT_HW_CIPHER_F8_MODE = 3, | ||
225 | ICP_QAT_HW_CIPHER_XTS_MODE = 6, | ||
226 | ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 | ||
227 | }; | ||
228 | |||
229 | struct icp_qat_hw_cipher_config { | ||
230 | uint32_t val; | ||
231 | uint32_t reserved; | ||
232 | }; | ||
233 | |||
234 | enum icp_qat_hw_cipher_dir { | ||
235 | ICP_QAT_HW_CIPHER_ENCRYPT = 0, | ||
236 | ICP_QAT_HW_CIPHER_DECRYPT = 1, | ||
237 | }; | ||
238 | |||
239 | enum icp_qat_hw_cipher_convert { | ||
240 | ICP_QAT_HW_CIPHER_NO_CONVERT = 0, | ||
241 | ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, | ||
242 | }; | ||
243 | |||
244 | #define QAT_CIPHER_MODE_BITPOS 4 | ||
245 | #define QAT_CIPHER_MODE_MASK 0xF | ||
246 | #define QAT_CIPHER_ALGO_BITPOS 0 | ||
247 | #define QAT_CIPHER_ALGO_MASK 0xF | ||
248 | #define QAT_CIPHER_CONVERT_BITPOS 9 | ||
249 | #define QAT_CIPHER_CONVERT_MASK 0x1 | ||
250 | #define QAT_CIPHER_DIR_BITPOS 8 | ||
251 | #define QAT_CIPHER_DIR_MASK 0x1 | ||
252 | #define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 | ||
253 | #define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 | ||
254 | #define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ | ||
255 | (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ | ||
256 | ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ | ||
257 | ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ | ||
258 | ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) | ||
259 | #define ICP_QAT_HW_DES_BLK_SZ 8 | ||
260 | #define ICP_QAT_HW_3DES_BLK_SZ 8 | ||
261 | #define ICP_QAT_HW_NULL_BLK_SZ 8 | ||
262 | #define ICP_QAT_HW_AES_BLK_SZ 16 | ||
263 | #define ICP_QAT_HW_KASUMI_BLK_SZ 8 | ||
264 | #define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 | ||
265 | #define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 | ||
266 | #define ICP_QAT_HW_NULL_KEY_SZ 256 | ||
267 | #define ICP_QAT_HW_DES_KEY_SZ 8 | ||
268 | #define ICP_QAT_HW_3DES_KEY_SZ 24 | ||
269 | #define ICP_QAT_HW_AES_128_KEY_SZ 16 | ||
270 | #define ICP_QAT_HW_AES_192_KEY_SZ 24 | ||
271 | #define ICP_QAT_HW_AES_256_KEY_SZ 32 | ||
272 | #define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ | ||
273 | QAT_CIPHER_MODE_F8_KEY_SZ_MULT) | ||
274 | #define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ | ||
275 | QAT_CIPHER_MODE_F8_KEY_SZ_MULT) | ||
276 | #define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ | ||
277 | QAT_CIPHER_MODE_F8_KEY_SZ_MULT) | ||
278 | #define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ | ||
279 | QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) | ||
280 | #define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ | ||
281 | QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) | ||
282 | #define ICP_QAT_HW_KASUMI_KEY_SZ 16 | ||
283 | #define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ | ||
284 | QAT_CIPHER_MODE_F8_KEY_SZ_MULT) | ||
285 | #define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ | ||
286 | QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) | ||
287 | #define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ | ||
288 | QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) | ||
289 | #define ICP_QAT_HW_ARC4_KEY_SZ 256 | ||
290 | #define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 | ||
291 | #define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 | ||
292 | #define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 | ||
293 | #define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 | ||
294 | #define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 | ||
295 | #define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024 | ||
296 | |||
297 | struct icp_qat_hw_cipher_aes256_f8 { | ||
298 | struct icp_qat_hw_cipher_config cipher_config; | ||
299 | uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; | ||
300 | }; | ||
301 | |||
302 | struct icp_qat_hw_cipher_algo_blk { | ||
303 | struct icp_qat_hw_cipher_aes256_f8 aes; | ||
304 | }; | ||
305 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h new file mode 100644 index 000000000000..2132a8cbc4ec --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h | |||
@@ -0,0 +1,377 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef __ICP_QAT_UCLO_H__ | ||
48 | #define __ICP_QAT_UCLO_H__ | ||
49 | |||
50 | #define ICP_QAT_AC_C_CPU_TYPE 0x00400000 | ||
51 | #define ICP_QAT_UCLO_MAX_AE 12 | ||
52 | #define ICP_QAT_UCLO_MAX_CTX 8 | ||
53 | #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) | ||
54 | #define ICP_QAT_UCLO_MAX_USTORE 0x4000 | ||
55 | #define ICP_QAT_UCLO_MAX_XFER_REG 128 | ||
56 | #define ICP_QAT_UCLO_MAX_GPR_REG 128 | ||
57 | #define ICP_QAT_UCLO_MAX_NN_REG 128 | ||
58 | #define ICP_QAT_UCLO_MAX_LMEM_REG 1024 | ||
59 | #define ICP_QAT_UCLO_AE_ALL_CTX 0xff | ||
60 | #define ICP_QAT_UOF_OBJID_LEN 8 | ||
61 | #define ICP_QAT_UOF_FID 0xc6c2 | ||
62 | #define ICP_QAT_UOF_MAJVER 0x4 | ||
63 | #define ICP_QAT_UOF_MINVER 0x11 | ||
64 | #define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff | ||
65 | #define ICP_QAT_UOF_OBJS "UOF_OBJS" | ||
66 | #define ICP_QAT_UOF_STRT "UOF_STRT" | ||
67 | #define ICP_QAT_UOF_GTID "UOF_GTID" | ||
68 | #define ICP_QAT_UOF_IMAG "UOF_IMAG" | ||
69 | #define ICP_QAT_UOF_IMEM "UOF_IMEM" | ||
70 | #define ICP_QAT_UOF_MSEG "UOF_MSEG" | ||
71 | #define ICP_QAT_UOF_LOCAL_SCOPE 1 | ||
72 | #define ICP_QAT_UOF_INIT_EXPR 0 | ||
73 | #define ICP_QAT_UOF_INIT_REG 1 | ||
74 | #define ICP_QAT_UOF_INIT_REG_CTX 2 | ||
75 | #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 | ||
76 | |||
77 | #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) | ||
78 | #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) | ||
79 | #define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1) | ||
80 | #define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1) | ||
81 | |||
82 | #define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1) | ||
83 | #define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1) | ||
84 | |||
85 | enum icp_qat_uof_mem_region { | ||
86 | ICP_QAT_UOF_SRAM_REGION = 0x0, | ||
87 | ICP_QAT_UOF_LMEM_REGION = 0x3, | ||
88 | ICP_QAT_UOF_UMEM_REGION = 0x5 | ||
89 | }; | ||
90 | |||
91 | enum icp_qat_uof_regtype { | ||
92 | ICP_NO_DEST, | ||
93 | ICP_GPA_REL, | ||
94 | ICP_GPA_ABS, | ||
95 | ICP_GPB_REL, | ||
96 | ICP_GPB_ABS, | ||
97 | ICP_SR_REL, | ||
98 | ICP_SR_RD_REL, | ||
99 | ICP_SR_WR_REL, | ||
100 | ICP_SR_ABS, | ||
101 | ICP_SR_RD_ABS, | ||
102 | ICP_SR_WR_ABS, | ||
103 | ICP_DR_REL, | ||
104 | ICP_DR_RD_REL, | ||
105 | ICP_DR_WR_REL, | ||
106 | ICP_DR_ABS, | ||
107 | ICP_DR_RD_ABS, | ||
108 | ICP_DR_WR_ABS, | ||
109 | ICP_LMEM, | ||
110 | ICP_LMEM0, | ||
111 | ICP_LMEM1, | ||
112 | ICP_NEIGH_REL, | ||
113 | }; | ||
114 | |||
115 | struct icp_qat_uclo_page { | ||
116 | struct icp_qat_uclo_encap_page *encap_page; | ||
117 | struct icp_qat_uclo_region *region; | ||
118 | unsigned int flags; | ||
119 | }; | ||
120 | |||
121 | struct icp_qat_uclo_region { | ||
122 | struct icp_qat_uclo_page *loaded; | ||
123 | struct icp_qat_uclo_page *page; | ||
124 | }; | ||
125 | |||
126 | struct icp_qat_uclo_aeslice { | ||
127 | struct icp_qat_uclo_region *region; | ||
128 | struct icp_qat_uclo_page *page; | ||
129 | struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; | ||
130 | struct icp_qat_uclo_encapme *encap_image; | ||
131 | unsigned int ctx_mask_assigned; | ||
132 | unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX]; | ||
133 | }; | ||
134 | |||
135 | struct icp_qat_uclo_aedata { | ||
136 | unsigned int slice_num; | ||
137 | unsigned int eff_ustore_size; | ||
138 | struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX]; | ||
139 | }; | ||
140 | |||
141 | struct icp_qat_uof_encap_obj { | ||
142 | char *beg_uof; | ||
143 | struct icp_qat_uof_objhdr *obj_hdr; | ||
144 | struct icp_qat_uof_chunkhdr *chunk_hdr; | ||
145 | struct icp_qat_uof_varmem_seg *var_mem_seg; | ||
146 | }; | ||
147 | |||
148 | struct icp_qat_uclo_encap_uwblock { | ||
149 | unsigned int start_addr; | ||
150 | unsigned int words_num; | ||
151 | uint64_t micro_words; | ||
152 | }; | ||
153 | |||
154 | struct icp_qat_uclo_encap_page { | ||
155 | unsigned int def_page; | ||
156 | unsigned int page_region; | ||
157 | unsigned int beg_addr_v; | ||
158 | unsigned int beg_addr_p; | ||
159 | unsigned int micro_words_num; | ||
160 | unsigned int uwblock_num; | ||
161 | struct icp_qat_uclo_encap_uwblock *uwblock; | ||
162 | }; | ||
163 | |||
164 | struct icp_qat_uclo_encapme { | ||
165 | struct icp_qat_uof_image *img_ptr; | ||
166 | struct icp_qat_uclo_encap_page *page; | ||
167 | unsigned int ae_reg_num; | ||
168 | struct icp_qat_uof_ae_reg *ae_reg; | ||
169 | unsigned int init_regsym_num; | ||
170 | struct icp_qat_uof_init_regsym *init_regsym; | ||
171 | unsigned int sbreak_num; | ||
172 | struct icp_qat_uof_sbreak *sbreak; | ||
173 | unsigned int uwords_num; | ||
174 | }; | ||
175 | |||
176 | struct icp_qat_uclo_init_mem_table { | ||
177 | unsigned int entry_num; | ||
178 | struct icp_qat_uof_initmem *init_mem; | ||
179 | }; | ||
180 | |||
181 | struct icp_qat_uclo_objhdr { | ||
182 | char *file_buff; | ||
183 | unsigned int checksum; | ||
184 | unsigned int size; | ||
185 | }; | ||
186 | |||
187 | struct icp_qat_uof_strtable { | ||
188 | unsigned int table_len; | ||
189 | unsigned int reserved; | ||
190 | uint64_t strings; | ||
191 | }; | ||
192 | |||
193 | struct icp_qat_uclo_objhandle { | ||
194 | unsigned int prod_type; | ||
195 | unsigned int prod_rev; | ||
196 | struct icp_qat_uclo_objhdr *obj_hdr; | ||
197 | struct icp_qat_uof_encap_obj encap_uof_obj; | ||
198 | struct icp_qat_uof_strtable str_table; | ||
199 | struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE]; | ||
200 | struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE]; | ||
201 | struct icp_qat_uclo_init_mem_table init_mem_tab; | ||
202 | struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE]; | ||
203 | struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE]; | ||
204 | int uimage_num; | ||
205 | int uword_in_bytes; | ||
206 | int global_inited; | ||
207 | unsigned int ae_num; | ||
208 | unsigned int ustore_phy_size; | ||
209 | void *obj_buf; | ||
210 | uint64_t *uword_buf; | ||
211 | }; | ||
212 | |||
213 | struct icp_qat_uof_uword_block { | ||
214 | unsigned int start_addr; | ||
215 | unsigned int words_num; | ||
216 | unsigned int uword_offset; | ||
217 | unsigned int reserved; | ||
218 | }; | ||
219 | |||
220 | struct icp_qat_uof_filehdr { | ||
221 | unsigned short file_id; | ||
222 | unsigned short reserved1; | ||
223 | char min_ver; | ||
224 | char maj_ver; | ||
225 | unsigned short reserved2; | ||
226 | unsigned short max_chunks; | ||
227 | unsigned short num_chunks; | ||
228 | }; | ||
229 | |||
230 | struct icp_qat_uof_filechunkhdr { | ||
231 | char chunk_id[ICP_QAT_UOF_OBJID_LEN]; | ||
232 | unsigned int checksum; | ||
233 | unsigned int offset; | ||
234 | unsigned int size; | ||
235 | }; | ||
236 | |||
237 | struct icp_qat_uof_objhdr { | ||
238 | unsigned int cpu_type; | ||
239 | unsigned short min_cpu_ver; | ||
240 | unsigned short max_cpu_ver; | ||
241 | short max_chunks; | ||
242 | short num_chunks; | ||
243 | unsigned int reserved1; | ||
244 | unsigned int reserved2; | ||
245 | }; | ||
246 | |||
247 | struct icp_qat_uof_chunkhdr { | ||
248 | char chunk_id[ICP_QAT_UOF_OBJID_LEN]; | ||
249 | unsigned int offset; | ||
250 | unsigned int size; | ||
251 | }; | ||
252 | |||
253 | struct icp_qat_uof_memvar_attr { | ||
254 | unsigned int offset_in_byte; | ||
255 | unsigned int value; | ||
256 | }; | ||
257 | |||
258 | struct icp_qat_uof_initmem { | ||
259 | unsigned int sym_name; | ||
260 | char region; | ||
261 | char scope; | ||
262 | unsigned short reserved1; | ||
263 | unsigned int addr; | ||
264 | unsigned int num_in_bytes; | ||
265 | unsigned int val_attr_num; | ||
266 | }; | ||
267 | |||
268 | struct icp_qat_uof_init_regsym { | ||
269 | unsigned int sym_name; | ||
270 | char init_type; | ||
271 | char value_type; | ||
272 | char reg_type; | ||
273 | unsigned char ctx; | ||
274 | unsigned int reg_addr; | ||
275 | unsigned int value; | ||
276 | }; | ||
277 | |||
278 | struct icp_qat_uof_varmem_seg { | ||
279 | unsigned int sram_base; | ||
280 | unsigned int sram_size; | ||
281 | unsigned int sram_alignment; | ||
282 | unsigned int sdram_base; | ||
283 | unsigned int sdram_size; | ||
284 | unsigned int sdram_alignment; | ||
285 | unsigned int sdram1_base; | ||
286 | unsigned int sdram1_size; | ||
287 | unsigned int sdram1_alignment; | ||
288 | unsigned int scratch_base; | ||
289 | unsigned int scratch_size; | ||
290 | unsigned int scratch_alignment; | ||
291 | }; | ||
292 | |||
293 | struct icp_qat_uof_gtid { | ||
294 | char tool_id[ICP_QAT_UOF_OBJID_LEN]; | ||
295 | int tool_ver; | ||
296 | unsigned int reserved1; | ||
297 | unsigned int reserved2; | ||
298 | }; | ||
299 | |||
300 | struct icp_qat_uof_sbreak { | ||
301 | unsigned int page_num; | ||
302 | unsigned int virt_uaddr; | ||
303 | unsigned char sbreak_type; | ||
304 | unsigned char reg_type; | ||
305 | unsigned short reserved1; | ||
306 | unsigned int addr_offset; | ||
307 | unsigned int reg_addr; | ||
308 | }; | ||
309 | |||
310 | struct icp_qat_uof_code_page { | ||
311 | unsigned int page_region; | ||
312 | unsigned int page_num; | ||
313 | unsigned char def_page; | ||
314 | unsigned char reserved2; | ||
315 | unsigned short reserved1; | ||
316 | unsigned int beg_addr_v; | ||
317 | unsigned int beg_addr_p; | ||
318 | unsigned int neigh_reg_tab_offset; | ||
319 | unsigned int uc_var_tab_offset; | ||
320 | unsigned int imp_var_tab_offset; | ||
321 | unsigned int imp_expr_tab_offset; | ||
322 | unsigned int code_area_offset; | ||
323 | }; | ||
324 | |||
325 | struct icp_qat_uof_image { | ||
326 | unsigned int img_name; | ||
327 | unsigned int ae_assigned; | ||
328 | unsigned int ctx_assigned; | ||
329 | unsigned int cpu_type; | ||
330 | unsigned int entry_address; | ||
331 | unsigned int fill_pattern[2]; | ||
332 | unsigned int reloadable_size; | ||
333 | unsigned char sensitivity; | ||
334 | unsigned char reserved; | ||
335 | unsigned short ae_mode; | ||
336 | unsigned short max_ver; | ||
337 | unsigned short min_ver; | ||
338 | unsigned short image_attrib; | ||
339 | unsigned short reserved2; | ||
340 | unsigned short page_region_num; | ||
341 | unsigned short numpages; | ||
342 | unsigned int reg_tab_offset; | ||
343 | unsigned int init_reg_sym_tab; | ||
344 | unsigned int sbreak_tab; | ||
345 | unsigned int app_metadata; | ||
346 | }; | ||
347 | |||
348 | struct icp_qat_uof_objtable { | ||
349 | unsigned int entry_num; | ||
350 | }; | ||
351 | |||
352 | struct icp_qat_uof_ae_reg { | ||
353 | unsigned int name; | ||
354 | unsigned int vis_name; | ||
355 | unsigned short type; | ||
356 | unsigned short addr; | ||
357 | unsigned short access_mode; | ||
358 | unsigned char visible; | ||
359 | unsigned char reserved1; | ||
360 | unsigned short ref_count; | ||
361 | unsigned short reserved2; | ||
362 | unsigned int xo_id; | ||
363 | }; | ||
364 | |||
365 | struct icp_qat_uof_code_area { | ||
366 | unsigned int micro_words_num; | ||
367 | unsigned int uword_block_tab; | ||
368 | }; | ||
369 | |||
370 | struct icp_qat_uof_batch_init { | ||
371 | unsigned int ae; | ||
372 | unsigned int addr; | ||
373 | unsigned int *value; | ||
374 | unsigned int size; | ||
375 | struct icp_qat_uof_batch_init *next; | ||
376 | }; | ||
377 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c new file mode 100644 index 000000000000..59df48872955 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -0,0 +1,1038 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/crypto.h> | ||
50 | #include <crypto/aead.h> | ||
51 | #include <crypto/aes.h> | ||
52 | #include <crypto/sha.h> | ||
53 | #include <crypto/hash.h> | ||
54 | #include <crypto/algapi.h> | ||
55 | #include <crypto/authenc.h> | ||
56 | #include <crypto/rng.h> | ||
57 | #include <linux/dma-mapping.h> | ||
58 | #include "adf_accel_devices.h" | ||
59 | #include "adf_transport.h" | ||
60 | #include "adf_common_drv.h" | ||
61 | #include "qat_crypto.h" | ||
62 | #include "icp_qat_hw.h" | ||
63 | #include "icp_qat_fw.h" | ||
64 | #include "icp_qat_fw_la.h" | ||
65 | |||
66 | #define QAT_AES_HW_CONFIG_ENC(alg) \ | ||
67 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | ||
68 | ICP_QAT_HW_CIPHER_NO_CONVERT, \ | ||
69 | ICP_QAT_HW_CIPHER_ENCRYPT) | ||
70 | |||
71 | #define QAT_AES_HW_CONFIG_DEC(alg) \ | ||
72 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | ||
73 | ICP_QAT_HW_CIPHER_KEY_CONVERT, \ | ||
74 | ICP_QAT_HW_CIPHER_DECRYPT) | ||
75 | |||
76 | static atomic_t active_dev; | ||
77 | |||
78 | struct qat_alg_buf { | ||
79 | uint32_t len; | ||
80 | uint32_t resrvd; | ||
81 | uint64_t addr; | ||
82 | } __packed; | ||
83 | |||
84 | struct qat_alg_buf_list { | ||
85 | uint64_t resrvd; | ||
86 | uint32_t num_bufs; | ||
87 | uint32_t num_mapped_bufs; | ||
88 | struct qat_alg_buf bufers[]; | ||
89 | } __packed __aligned(64); | ||
90 | |||
91 | /* Common content descriptor */ | ||
92 | struct qat_alg_cd { | ||
93 | union { | ||
94 | struct qat_enc { /* Encrypt content desc */ | ||
95 | struct icp_qat_hw_cipher_algo_blk cipher; | ||
96 | struct icp_qat_hw_auth_algo_blk hash; | ||
97 | } qat_enc_cd; | ||
98 | struct qat_dec { /* Decrytp content desc */ | ||
99 | struct icp_qat_hw_auth_algo_blk hash; | ||
100 | struct icp_qat_hw_cipher_algo_blk cipher; | ||
101 | } qat_dec_cd; | ||
102 | }; | ||
103 | } __aligned(64); | ||
104 | |||
105 | #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) | ||
106 | |||
107 | struct qat_auth_state { | ||
108 | uint8_t data[MAX_AUTH_STATE_SIZE]; | ||
109 | } __aligned(64); | ||
110 | |||
111 | struct qat_alg_session_ctx { | ||
112 | struct qat_alg_cd *enc_cd; | ||
113 | dma_addr_t enc_cd_paddr; | ||
114 | struct qat_alg_cd *dec_cd; | ||
115 | dma_addr_t dec_cd_paddr; | ||
116 | struct qat_auth_state *auth_hw_state_enc; | ||
117 | dma_addr_t auth_state_enc_paddr; | ||
118 | struct qat_auth_state *auth_hw_state_dec; | ||
119 | dma_addr_t auth_state_dec_paddr; | ||
120 | struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; | ||
121 | struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; | ||
122 | struct qat_crypto_instance *inst; | ||
123 | struct crypto_tfm *tfm; | ||
124 | struct crypto_shash *hash_tfm; | ||
125 | enum icp_qat_hw_auth_algo qat_hash_alg; | ||
126 | uint8_t salt[AES_BLOCK_SIZE]; | ||
127 | spinlock_t lock; /* protects qat_alg_session_ctx struct */ | ||
128 | }; | ||
129 | |||
130 | static int get_current_node(void) | ||
131 | { | ||
132 | return cpu_data(current_thread_info()->cpu).phys_proc_id; | ||
133 | } | ||
134 | |||
135 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) | ||
136 | { | ||
137 | switch (qat_hash_alg) { | ||
138 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
139 | return ICP_QAT_HW_SHA1_STATE1_SZ; | ||
140 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
141 | return ICP_QAT_HW_SHA256_STATE1_SZ; | ||
142 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
143 | return ICP_QAT_HW_SHA512_STATE1_SZ; | ||
144 | default: | ||
145 | return -EFAULT; | ||
146 | }; | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | |||
150 | static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, | ||
151 | struct qat_alg_session_ctx *ctx, | ||
152 | const uint8_t *auth_key, | ||
153 | unsigned int auth_keylen, uint8_t *auth_state) | ||
154 | { | ||
155 | struct { | ||
156 | struct shash_desc shash; | ||
157 | char ctx[crypto_shash_descsize(ctx->hash_tfm)]; | ||
158 | } desc; | ||
159 | struct sha1_state sha1; | ||
160 | struct sha256_state sha256; | ||
161 | struct sha512_state sha512; | ||
162 | int block_size = crypto_shash_blocksize(ctx->hash_tfm); | ||
163 | int digest_size = crypto_shash_digestsize(ctx->hash_tfm); | ||
164 | uint8_t *ipad = auth_state; | ||
165 | uint8_t *opad = ipad + block_size; | ||
166 | __be32 *hash_state_out; | ||
167 | __be64 *hash512_state_out; | ||
168 | int i, offset; | ||
169 | |||
170 | desc.shash.tfm = ctx->hash_tfm; | ||
171 | desc.shash.flags = 0x0; | ||
172 | |||
173 | if (auth_keylen > block_size) { | ||
174 | char buff[SHA512_BLOCK_SIZE]; | ||
175 | int ret = crypto_shash_digest(&desc.shash, auth_key, | ||
176 | auth_keylen, buff); | ||
177 | if (ret) | ||
178 | return ret; | ||
179 | |||
180 | memcpy(ipad, buff, digest_size); | ||
181 | memcpy(opad, buff, digest_size); | ||
182 | memset(ipad + digest_size, 0, block_size - digest_size); | ||
183 | memset(opad + digest_size, 0, block_size - digest_size); | ||
184 | } else { | ||
185 | memcpy(ipad, auth_key, auth_keylen); | ||
186 | memcpy(opad, auth_key, auth_keylen); | ||
187 | memset(ipad + auth_keylen, 0, block_size - auth_keylen); | ||
188 | memset(opad + auth_keylen, 0, block_size - auth_keylen); | ||
189 | } | ||
190 | |||
191 | for (i = 0; i < block_size; i++) { | ||
192 | char *ipad_ptr = ipad + i; | ||
193 | char *opad_ptr = opad + i; | ||
194 | *ipad_ptr ^= 0x36; | ||
195 | *opad_ptr ^= 0x5C; | ||
196 | } | ||
197 | |||
198 | if (crypto_shash_init(&desc.shash)) | ||
199 | return -EFAULT; | ||
200 | |||
201 | if (crypto_shash_update(&desc.shash, ipad, block_size)) | ||
202 | return -EFAULT; | ||
203 | |||
204 | hash_state_out = (__be32 *)hash->sha.state1; | ||
205 | hash512_state_out = (__be64 *)hash_state_out; | ||
206 | |||
207 | switch (ctx->qat_hash_alg) { | ||
208 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
209 | if (crypto_shash_export(&desc.shash, &sha1)) | ||
210 | return -EFAULT; | ||
211 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | ||
212 | *hash_state_out = cpu_to_be32(*(sha1.state + i)); | ||
213 | break; | ||
214 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
215 | if (crypto_shash_export(&desc.shash, &sha256)) | ||
216 | return -EFAULT; | ||
217 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | ||
218 | *hash_state_out = cpu_to_be32(*(sha256.state + i)); | ||
219 | break; | ||
220 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
221 | if (crypto_shash_export(&desc.shash, &sha512)) | ||
222 | return -EFAULT; | ||
223 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) | ||
224 | *hash512_state_out = cpu_to_be64(*(sha512.state + i)); | ||
225 | break; | ||
226 | default: | ||
227 | return -EFAULT; | ||
228 | } | ||
229 | |||
230 | if (crypto_shash_init(&desc.shash)) | ||
231 | return -EFAULT; | ||
232 | |||
233 | if (crypto_shash_update(&desc.shash, opad, block_size)) | ||
234 | return -EFAULT; | ||
235 | |||
236 | offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); | ||
237 | hash_state_out = (__be32 *)(hash->sha.state1 + offset); | ||
238 | hash512_state_out = (__be64 *)hash_state_out; | ||
239 | |||
240 | switch (ctx->qat_hash_alg) { | ||
241 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
242 | if (crypto_shash_export(&desc.shash, &sha1)) | ||
243 | return -EFAULT; | ||
244 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | ||
245 | *hash_state_out = cpu_to_be32(*(sha1.state + i)); | ||
246 | break; | ||
247 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
248 | if (crypto_shash_export(&desc.shash, &sha256)) | ||
249 | return -EFAULT; | ||
250 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | ||
251 | *hash_state_out = cpu_to_be32(*(sha256.state + i)); | ||
252 | break; | ||
253 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
254 | if (crypto_shash_export(&desc.shash, &sha512)) | ||
255 | return -EFAULT; | ||
256 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) | ||
257 | *hash512_state_out = cpu_to_be64(*(sha512.state + i)); | ||
258 | break; | ||
259 | default: | ||
260 | return -EFAULT; | ||
261 | } | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | ||
266 | { | ||
267 | header->hdr_flags = | ||
268 | ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); | ||
269 | header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; | ||
270 | header->comn_req_flags = | ||
271 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, | ||
272 | QAT_COMN_PTR_TYPE_SGL); | ||
273 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | ||
274 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | ||
275 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, | ||
276 | ICP_QAT_FW_LA_PARTIAL_NONE); | ||
277 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, | ||
278 | ICP_QAT_FW_CIPH_IV_16BYTE_DATA); | ||
279 | ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, | ||
280 | ICP_QAT_FW_LA_NO_PROTO); | ||
281 | ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, | ||
282 | ICP_QAT_FW_LA_NO_UPDATE_STATE); | ||
283 | } | ||
284 | |||
285 | static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | ||
286 | int alg, struct crypto_authenc_keys *keys) | ||
287 | { | ||
288 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | ||
289 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | ||
290 | struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; | ||
291 | struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; | ||
292 | struct icp_qat_hw_auth_algo_blk *hash = | ||
293 | (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + | ||
294 | sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); | ||
295 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl; | ||
296 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | ||
297 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | ||
298 | void *ptr = &req_tmpl->cd_ctrl; | ||
299 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; | ||
300 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | ||
301 | struct icp_qat_fw_la_auth_req_params *auth_param = | ||
302 | (struct icp_qat_fw_la_auth_req_params *) | ||
303 | ((char *)&req_tmpl->serv_specif_rqpars + | ||
304 | sizeof(struct icp_qat_fw_la_cipher_req_params)); | ||
305 | |||
306 | /* CD setup */ | ||
307 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); | ||
308 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | ||
309 | hash->sha.inner_setup.auth_config.config = | ||
310 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | ||
311 | ctx->qat_hash_alg, digestsize); | ||
312 | hash->sha.inner_setup.auth_counter.counter = | ||
313 | cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); | ||
314 | |||
315 | if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, | ||
316 | (uint8_t *)ctx->auth_hw_state_enc)) | ||
317 | return -EFAULT; | ||
318 | |||
319 | /* Request setup */ | ||
320 | qat_alg_init_common_hdr(header); | ||
321 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; | ||
322 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | ||
323 | ICP_QAT_FW_LA_RET_AUTH_RES); | ||
324 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | ||
325 | ICP_QAT_FW_LA_NO_CMP_AUTH_RES); | ||
326 | cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; | ||
327 | cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; | ||
328 | |||
329 | /* Cipher CD config setup */ | ||
330 | cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; | ||
331 | cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | ||
332 | cipher_cd_ctrl->cipher_cfg_offset = 0; | ||
333 | ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | ||
334 | ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | ||
335 | /* Auth CD config setup */ | ||
336 | hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; | ||
337 | hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; | ||
338 | hash_cd_ctrl->inner_res_sz = digestsize; | ||
339 | hash_cd_ctrl->final_sz = digestsize; | ||
340 | |||
341 | switch (ctx->qat_hash_alg) { | ||
342 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
343 | hash_cd_ctrl->inner_state1_sz = | ||
344 | round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); | ||
345 | hash_cd_ctrl->inner_state2_sz = | ||
346 | round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); | ||
347 | break; | ||
348 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
349 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; | ||
350 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; | ||
351 | break; | ||
352 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
353 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; | ||
354 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; | ||
355 | break; | ||
356 | default: | ||
357 | break; | ||
358 | } | ||
359 | hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + | ||
360 | ((sizeof(struct icp_qat_hw_auth_setup) + | ||
361 | round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); | ||
362 | auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr + | ||
363 | sizeof(struct icp_qat_hw_auth_counter) + | ||
364 | round_up(hash_cd_ctrl->inner_state1_sz, 8); | ||
365 | ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | ||
366 | ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | ||
371 | int alg, struct crypto_authenc_keys *keys) | ||
372 | { | ||
373 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | ||
374 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | ||
375 | struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; | ||
376 | struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; | ||
377 | struct icp_qat_hw_cipher_algo_blk *cipher = | ||
378 | (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + | ||
379 | sizeof(struct icp_qat_hw_auth_setup) + | ||
380 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); | ||
381 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl; | ||
382 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | ||
383 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | ||
384 | void *ptr = &req_tmpl->cd_ctrl; | ||
385 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; | ||
386 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | ||
387 | struct icp_qat_fw_la_auth_req_params *auth_param = | ||
388 | (struct icp_qat_fw_la_auth_req_params *) | ||
389 | ((char *)&req_tmpl->serv_specif_rqpars + | ||
390 | sizeof(struct icp_qat_fw_la_cipher_req_params)); | ||
391 | |||
392 | /* CD setup */ | ||
393 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg); | ||
394 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | ||
395 | hash->sha.inner_setup.auth_config.config = | ||
396 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | ||
397 | ctx->qat_hash_alg, | ||
398 | digestsize); | ||
399 | hash->sha.inner_setup.auth_counter.counter = | ||
400 | cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); | ||
401 | |||
402 | if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, | ||
403 | (uint8_t *)ctx->auth_hw_state_dec)) | ||
404 | return -EFAULT; | ||
405 | |||
406 | /* Request setup */ | ||
407 | qat_alg_init_common_hdr(header); | ||
408 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; | ||
409 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | ||
410 | ICP_QAT_FW_LA_NO_RET_AUTH_RES); | ||
411 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | ||
412 | ICP_QAT_FW_LA_CMP_AUTH_RES); | ||
413 | cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; | ||
414 | cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; | ||
415 | |||
416 | /* Cipher CD config setup */ | ||
417 | cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; | ||
418 | cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | ||
419 | cipher_cd_ctrl->cipher_cfg_offset = | ||
420 | (sizeof(struct icp_qat_hw_auth_setup) + | ||
421 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3; | ||
422 | ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | ||
423 | ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | ||
424 | |||
425 | /* Auth CD config setup */ | ||
426 | hash_cd_ctrl->hash_cfg_offset = 0; | ||
427 | hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; | ||
428 | hash_cd_ctrl->inner_res_sz = digestsize; | ||
429 | hash_cd_ctrl->final_sz = digestsize; | ||
430 | |||
431 | switch (ctx->qat_hash_alg) { | ||
432 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | ||
433 | hash_cd_ctrl->inner_state1_sz = | ||
434 | round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); | ||
435 | hash_cd_ctrl->inner_state2_sz = | ||
436 | round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); | ||
437 | break; | ||
438 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | ||
439 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; | ||
440 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; | ||
441 | break; | ||
442 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | ||
443 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; | ||
444 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; | ||
445 | break; | ||
446 | default: | ||
447 | break; | ||
448 | } | ||
449 | |||
450 | hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + | ||
451 | ((sizeof(struct icp_qat_hw_auth_setup) + | ||
452 | round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); | ||
453 | auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr + | ||
454 | sizeof(struct icp_qat_hw_auth_counter) + | ||
455 | round_up(hash_cd_ctrl->inner_state1_sz, 8); | ||
456 | auth_param->auth_res_sz = digestsize; | ||
457 | ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | ||
458 | ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx, | ||
463 | const uint8_t *key, unsigned int keylen) | ||
464 | { | ||
465 | struct crypto_authenc_keys keys; | ||
466 | int alg; | ||
467 | |||
468 | if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) | ||
469 | return -EFAULT; | ||
470 | |||
471 | if (crypto_authenc_extractkeys(&keys, key, keylen)) | ||
472 | goto bad_key; | ||
473 | |||
474 | switch (keys.enckeylen) { | ||
475 | case AES_KEYSIZE_128: | ||
476 | alg = ICP_QAT_HW_CIPHER_ALGO_AES128; | ||
477 | break; | ||
478 | case AES_KEYSIZE_192: | ||
479 | alg = ICP_QAT_HW_CIPHER_ALGO_AES192; | ||
480 | break; | ||
481 | case AES_KEYSIZE_256: | ||
482 | alg = ICP_QAT_HW_CIPHER_ALGO_AES256; | ||
483 | break; | ||
484 | default: | ||
485 | goto bad_key; | ||
486 | break; | ||
487 | } | ||
488 | |||
489 | if (qat_alg_init_enc_session(ctx, alg, &keys)) | ||
490 | goto error; | ||
491 | |||
492 | if (qat_alg_init_dec_session(ctx, alg, &keys)) | ||
493 | goto error; | ||
494 | |||
495 | return 0; | ||
496 | bad_key: | ||
497 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
498 | return -EINVAL; | ||
499 | error: | ||
500 | return -EFAULT; | ||
501 | } | ||
502 | |||
503 | static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, | ||
504 | unsigned int keylen) | ||
505 | { | ||
506 | struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm); | ||
507 | struct device *dev; | ||
508 | |||
509 | spin_lock(&ctx->lock); | ||
510 | if (ctx->enc_cd) { | ||
511 | /* rekeying */ | ||
512 | dev = &GET_DEV(ctx->inst->accel_dev); | ||
513 | memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); | ||
514 | memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); | ||
515 | memset(ctx->auth_hw_state_enc, 0, | ||
516 | sizeof(struct qat_auth_state)); | ||
517 | memset(ctx->auth_hw_state_dec, 0, | ||
518 | sizeof(struct qat_auth_state)); | ||
519 | memset(&ctx->enc_fw_req_tmpl, 0, | ||
520 | sizeof(struct icp_qat_fw_la_bulk_req)); | ||
521 | memset(&ctx->dec_fw_req_tmpl, 0, | ||
522 | sizeof(struct icp_qat_fw_la_bulk_req)); | ||
523 | } else { | ||
524 | /* new key */ | ||
525 | int node = get_current_node(); | ||
526 | struct qat_crypto_instance *inst = | ||
527 | qat_crypto_get_instance_node(node); | ||
528 | if (!inst) { | ||
529 | spin_unlock(&ctx->lock); | ||
530 | return -EINVAL; | ||
531 | } | ||
532 | |||
533 | dev = &GET_DEV(inst->accel_dev); | ||
534 | ctx->inst = inst; | ||
535 | ctx->enc_cd = dma_zalloc_coherent(dev, | ||
536 | sizeof(struct qat_alg_cd), | ||
537 | &ctx->enc_cd_paddr, | ||
538 | GFP_ATOMIC); | ||
539 | if (!ctx->enc_cd) { | ||
540 | spin_unlock(&ctx->lock); | ||
541 | return -ENOMEM; | ||
542 | } | ||
543 | ctx->dec_cd = dma_zalloc_coherent(dev, | ||
544 | sizeof(struct qat_alg_cd), | ||
545 | &ctx->dec_cd_paddr, | ||
546 | GFP_ATOMIC); | ||
547 | if (!ctx->dec_cd) { | ||
548 | spin_unlock(&ctx->lock); | ||
549 | goto out_free_enc; | ||
550 | } | ||
551 | ctx->auth_hw_state_enc = | ||
552 | dma_zalloc_coherent(dev, sizeof(struct qat_auth_state), | ||
553 | &ctx->auth_state_enc_paddr, | ||
554 | GFP_ATOMIC); | ||
555 | if (!ctx->auth_hw_state_enc) { | ||
556 | spin_unlock(&ctx->lock); | ||
557 | goto out_free_dec; | ||
558 | } | ||
559 | ctx->auth_hw_state_dec = | ||
560 | dma_zalloc_coherent(dev, sizeof(struct qat_auth_state), | ||
561 | &ctx->auth_state_dec_paddr, | ||
562 | GFP_ATOMIC); | ||
563 | if (!ctx->auth_hw_state_dec) { | ||
564 | spin_unlock(&ctx->lock); | ||
565 | goto out_free_auth_enc; | ||
566 | } | ||
567 | } | ||
568 | spin_unlock(&ctx->lock); | ||
569 | if (qat_alg_init_sessions(ctx, key, keylen)) | ||
570 | goto out_free_all; | ||
571 | |||
572 | return 0; | ||
573 | |||
574 | out_free_all: | ||
575 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | ||
576 | ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr); | ||
577 | ctx->auth_hw_state_dec = NULL; | ||
578 | out_free_auth_enc: | ||
579 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | ||
580 | ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr); | ||
581 | ctx->auth_hw_state_enc = NULL; | ||
582 | out_free_dec: | ||
583 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | ||
584 | ctx->dec_cd, ctx->dec_cd_paddr); | ||
585 | ctx->dec_cd = NULL; | ||
586 | out_free_enc: | ||
587 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | ||
588 | ctx->enc_cd, ctx->enc_cd_paddr); | ||
589 | ctx->enc_cd = NULL; | ||
590 | return -ENOMEM; | ||
591 | } | ||
592 | |||
593 | static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | ||
594 | struct qat_crypto_request *qat_req) | ||
595 | { | ||
596 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
597 | struct qat_alg_buf_list *bl = qat_req->buf.bl; | ||
598 | struct qat_alg_buf_list *blout = qat_req->buf.blout; | ||
599 | dma_addr_t blp = qat_req->buf.blp; | ||
600 | dma_addr_t blpout = qat_req->buf.bloutp; | ||
601 | size_t sz = qat_req->buf.sz; | ||
602 | int i, bufs = bl->num_bufs; | ||
603 | |||
604 | for (i = 0; i < bl->num_bufs; i++) | ||
605 | dma_unmap_single(dev, bl->bufers[i].addr, | ||
606 | bl->bufers[i].len, DMA_BIDIRECTIONAL); | ||
607 | |||
608 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | ||
609 | kfree(bl); | ||
610 | if (blp != blpout) { | ||
611 | /* If out of place operation dma unmap only data */ | ||
612 | int bufless = bufs - blout->num_mapped_bufs; | ||
613 | |||
614 | for (i = bufless; i < bufs; i++) { | ||
615 | dma_unmap_single(dev, blout->bufers[i].addr, | ||
616 | blout->bufers[i].len, | ||
617 | DMA_BIDIRECTIONAL); | ||
618 | } | ||
619 | dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE); | ||
620 | kfree(blout); | ||
621 | } | ||
622 | } | ||
623 | |||
624 | static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | ||
625 | struct scatterlist *assoc, | ||
626 | struct scatterlist *sgl, | ||
627 | struct scatterlist *sglout, uint8_t *iv, | ||
628 | uint8_t ivlen, | ||
629 | struct qat_crypto_request *qat_req) | ||
630 | { | ||
631 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
632 | int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); | ||
633 | struct qat_alg_buf_list *bufl; | ||
634 | struct qat_alg_buf_list *buflout = NULL; | ||
635 | dma_addr_t blp; | ||
636 | dma_addr_t bloutp = 0; | ||
637 | struct scatterlist *sg; | ||
638 | size_t sz = sizeof(struct qat_alg_buf_list) + | ||
639 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); | ||
640 | |||
641 | if (unlikely(!n)) | ||
642 | return -EINVAL; | ||
643 | |||
644 | bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); | ||
645 | if (unlikely(!bufl)) | ||
646 | return -ENOMEM; | ||
647 | |||
648 | blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); | ||
649 | if (unlikely(dma_mapping_error(dev, blp))) | ||
650 | goto err; | ||
651 | |||
652 | for_each_sg(assoc, sg, assoc_n, i) { | ||
653 | bufl->bufers[bufs].addr = dma_map_single(dev, | ||
654 | sg_virt(sg), | ||
655 | sg->length, | ||
656 | DMA_BIDIRECTIONAL); | ||
657 | bufl->bufers[bufs].len = sg->length; | ||
658 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | ||
659 | goto err; | ||
660 | bufs++; | ||
661 | } | ||
662 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, | ||
663 | DMA_BIDIRECTIONAL); | ||
664 | bufl->bufers[bufs].len = ivlen; | ||
665 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | ||
666 | goto err; | ||
667 | bufs++; | ||
668 | |||
669 | for_each_sg(sgl, sg, n, i) { | ||
670 | int y = i + bufs; | ||
671 | |||
672 | bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), | ||
673 | sg->length, | ||
674 | DMA_BIDIRECTIONAL); | ||
675 | bufl->bufers[y].len = sg->length; | ||
676 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) | ||
677 | goto err; | ||
678 | } | ||
679 | bufl->num_bufs = n + bufs; | ||
680 | qat_req->buf.bl = bufl; | ||
681 | qat_req->buf.blp = blp; | ||
682 | qat_req->buf.sz = sz; | ||
683 | /* Handle out of place operation */ | ||
684 | if (sgl != sglout) { | ||
685 | struct qat_alg_buf *bufers; | ||
686 | |||
687 | buflout = kmalloc_node(sz, GFP_ATOMIC, | ||
688 | inst->accel_dev->numa_node); | ||
689 | if (unlikely(!buflout)) | ||
690 | goto err; | ||
691 | bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); | ||
692 | if (unlikely(dma_mapping_error(dev, bloutp))) | ||
693 | goto err; | ||
694 | bufers = buflout->bufers; | ||
695 | /* For out of place operation dma map only data and | ||
696 | * reuse assoc mapping and iv */ | ||
697 | for (i = 0; i < bufs; i++) { | ||
698 | bufers[i].len = bufl->bufers[i].len; | ||
699 | bufers[i].addr = bufl->bufers[i].addr; | ||
700 | } | ||
701 | for_each_sg(sglout, sg, n, i) { | ||
702 | int y = i + bufs; | ||
703 | |||
704 | bufers[y].addr = dma_map_single(dev, sg_virt(sg), | ||
705 | sg->length, | ||
706 | DMA_BIDIRECTIONAL); | ||
707 | buflout->bufers[y].len = sg->length; | ||
708 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) | ||
709 | goto err; | ||
710 | } | ||
711 | buflout->num_bufs = n + bufs; | ||
712 | buflout->num_mapped_bufs = n; | ||
713 | qat_req->buf.blout = buflout; | ||
714 | qat_req->buf.bloutp = bloutp; | ||
715 | } else { | ||
716 | /* Otherwise set the src and dst to the same address */ | ||
717 | qat_req->buf.bloutp = qat_req->buf.blp; | ||
718 | } | ||
719 | return 0; | ||
720 | err: | ||
721 | dev_err(dev, "Failed to map buf for dma\n"); | ||
722 | for_each_sg(sgl, sg, n + bufs, i) { | ||
723 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) { | ||
724 | dma_unmap_single(dev, bufl->bufers[i].addr, | ||
725 | bufl->bufers[i].len, | ||
726 | DMA_BIDIRECTIONAL); | ||
727 | } | ||
728 | } | ||
729 | if (!dma_mapping_error(dev, blp)) | ||
730 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | ||
731 | kfree(bufl); | ||
732 | if (sgl != sglout && buflout) { | ||
733 | for_each_sg(sglout, sg, n, i) { | ||
734 | int y = i + bufs; | ||
735 | |||
736 | if (!dma_mapping_error(dev, buflout->bufers[y].addr)) | ||
737 | dma_unmap_single(dev, buflout->bufers[y].addr, | ||
738 | buflout->bufers[y].len, | ||
739 | DMA_BIDIRECTIONAL); | ||
740 | } | ||
741 | if (!dma_mapping_error(dev, bloutp)) | ||
742 | dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE); | ||
743 | kfree(buflout); | ||
744 | } | ||
745 | return -ENOMEM; | ||
746 | } | ||
747 | |||
748 | void qat_alg_callback(void *resp) | ||
749 | { | ||
750 | struct icp_qat_fw_la_resp *qat_resp = resp; | ||
751 | struct qat_crypto_request *qat_req = | ||
752 | (void *)(__force long)qat_resp->opaque_data; | ||
753 | struct qat_alg_session_ctx *ctx = qat_req->ctx; | ||
754 | struct qat_crypto_instance *inst = ctx->inst; | ||
755 | struct aead_request *areq = qat_req->areq; | ||
756 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; | ||
757 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); | ||
758 | |||
759 | qat_alg_free_bufl(inst, qat_req); | ||
760 | if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) | ||
761 | res = -EBADMSG; | ||
762 | areq->base.complete(&areq->base, res); | ||
763 | } | ||
764 | |||
765 | static int qat_alg_dec(struct aead_request *areq) | ||
766 | { | ||
767 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | ||
768 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | ||
769 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
770 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | ||
771 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | ||
772 | struct icp_qat_fw_la_auth_req_params *auth_param; | ||
773 | struct icp_qat_fw_la_bulk_req *msg; | ||
774 | int digst_size = crypto_aead_crt(aead_tfm)->authsize; | ||
775 | int ret, ctr = 0; | ||
776 | |||
777 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | ||
778 | areq->iv, AES_BLOCK_SIZE, qat_req); | ||
779 | if (unlikely(ret)) | ||
780 | return ret; | ||
781 | |||
782 | msg = &qat_req->req; | ||
783 | *msg = ctx->dec_fw_req_tmpl; | ||
784 | qat_req->ctx = ctx; | ||
785 | qat_req->areq = areq; | ||
786 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; | ||
787 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; | ||
788 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | ||
789 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | ||
790 | cipher_param->cipher_length = areq->cryptlen - digst_size; | ||
791 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | ||
792 | memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); | ||
793 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | ||
794 | auth_param->auth_off = 0; | ||
795 | auth_param->auth_len = areq->assoclen + | ||
796 | cipher_param->cipher_length + AES_BLOCK_SIZE; | ||
797 | do { | ||
798 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | ||
799 | } while (ret == -EAGAIN && ctr++ < 10); | ||
800 | |||
801 | if (ret == -EAGAIN) { | ||
802 | qat_alg_free_bufl(ctx->inst, qat_req); | ||
803 | return -EBUSY; | ||
804 | } | ||
805 | return -EINPROGRESS; | ||
806 | } | ||
807 | |||
808 | static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, | ||
809 | int enc_iv) | ||
810 | { | ||
811 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | ||
812 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | ||
813 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
814 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | ||
815 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | ||
816 | struct icp_qat_fw_la_auth_req_params *auth_param; | ||
817 | struct icp_qat_fw_la_bulk_req *msg; | ||
818 | int ret, ctr = 0; | ||
819 | |||
820 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | ||
821 | iv, AES_BLOCK_SIZE, qat_req); | ||
822 | if (unlikely(ret)) | ||
823 | return ret; | ||
824 | |||
825 | msg = &qat_req->req; | ||
826 | *msg = ctx->enc_fw_req_tmpl; | ||
827 | qat_req->ctx = ctx; | ||
828 | qat_req->areq = areq; | ||
829 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; | ||
830 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; | ||
831 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | ||
832 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | ||
833 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | ||
834 | |||
835 | if (enc_iv) { | ||
836 | cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE; | ||
837 | cipher_param->cipher_offset = areq->assoclen; | ||
838 | } else { | ||
839 | memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); | ||
840 | cipher_param->cipher_length = areq->cryptlen; | ||
841 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | ||
842 | } | ||
843 | auth_param->auth_off = 0; | ||
844 | auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE; | ||
845 | |||
846 | do { | ||
847 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | ||
848 | } while (ret == -EAGAIN && ctr++ < 10); | ||
849 | |||
850 | if (ret == -EAGAIN) { | ||
851 | qat_alg_free_bufl(ctx->inst, qat_req); | ||
852 | return -EBUSY; | ||
853 | } | ||
854 | return -EINPROGRESS; | ||
855 | } | ||
856 | |||
857 | static int qat_alg_enc(struct aead_request *areq) | ||
858 | { | ||
859 | return qat_alg_enc_internal(areq, areq->iv, 0); | ||
860 | } | ||
861 | |||
862 | static int qat_alg_genivenc(struct aead_givcrypt_request *req) | ||
863 | { | ||
864 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); | ||
865 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | ||
866 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
867 | __be64 seq; | ||
868 | |||
869 | memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); | ||
870 | seq = cpu_to_be64(req->seq); | ||
871 | memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), | ||
872 | &seq, sizeof(uint64_t)); | ||
873 | return qat_alg_enc_internal(&req->areq, req->giv, 1); | ||
874 | } | ||
875 | |||
876 | static int qat_alg_init(struct crypto_tfm *tfm, | ||
877 | enum icp_qat_hw_auth_algo hash, const char *hash_name) | ||
878 | { | ||
879 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
880 | |||
881 | memset(ctx, '\0', sizeof(*ctx)); | ||
882 | ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); | ||
883 | if (IS_ERR(ctx->hash_tfm)) | ||
884 | return -EFAULT; | ||
885 | spin_lock_init(&ctx->lock); | ||
886 | ctx->qat_hash_alg = hash; | ||
887 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | ||
888 | sizeof(struct qat_crypto_request); | ||
889 | ctx->tfm = tfm; | ||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | static int qat_alg_sha1_init(struct crypto_tfm *tfm) | ||
894 | { | ||
895 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); | ||
896 | } | ||
897 | |||
898 | static int qat_alg_sha256_init(struct crypto_tfm *tfm) | ||
899 | { | ||
900 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); | ||
901 | } | ||
902 | |||
903 | static int qat_alg_sha512_init(struct crypto_tfm *tfm) | ||
904 | { | ||
905 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); | ||
906 | } | ||
907 | |||
908 | static void qat_alg_exit(struct crypto_tfm *tfm) | ||
909 | { | ||
910 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | ||
911 | struct qat_crypto_instance *inst = ctx->inst; | ||
912 | struct device *dev; | ||
913 | |||
914 | if (!IS_ERR(ctx->hash_tfm)) | ||
915 | crypto_free_shash(ctx->hash_tfm); | ||
916 | |||
917 | if (!inst) | ||
918 | return; | ||
919 | |||
920 | dev = &GET_DEV(inst->accel_dev); | ||
921 | if (ctx->enc_cd) | ||
922 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | ||
923 | ctx->enc_cd, ctx->enc_cd_paddr); | ||
924 | if (ctx->dec_cd) | ||
925 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | ||
926 | ctx->dec_cd, ctx->dec_cd_paddr); | ||
927 | if (ctx->auth_hw_state_enc) | ||
928 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | ||
929 | ctx->auth_hw_state_enc, | ||
930 | ctx->auth_state_enc_paddr); | ||
931 | |||
932 | if (ctx->auth_hw_state_dec) | ||
933 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | ||
934 | ctx->auth_hw_state_dec, | ||
935 | ctx->auth_state_dec_paddr); | ||
936 | |||
937 | qat_crypto_put_instance(inst); | ||
938 | } | ||
939 | |||
940 | static struct crypto_alg qat_algs[] = { { | ||
941 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | ||
942 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", | ||
943 | .cra_priority = 4001, | ||
944 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
945 | .cra_blocksize = AES_BLOCK_SIZE, | ||
946 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | ||
947 | .cra_alignmask = 0, | ||
948 | .cra_type = &crypto_aead_type, | ||
949 | .cra_module = THIS_MODULE, | ||
950 | .cra_init = qat_alg_sha1_init, | ||
951 | .cra_exit = qat_alg_exit, | ||
952 | .cra_u = { | ||
953 | .aead = { | ||
954 | .setkey = qat_alg_setkey, | ||
955 | .decrypt = qat_alg_dec, | ||
956 | .encrypt = qat_alg_enc, | ||
957 | .givencrypt = qat_alg_genivenc, | ||
958 | .ivsize = AES_BLOCK_SIZE, | ||
959 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
960 | }, | ||
961 | }, | ||
962 | }, { | ||
963 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | ||
964 | .cra_driver_name = "qat_aes_cbc_hmac_sha256", | ||
965 | .cra_priority = 4001, | ||
966 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
967 | .cra_blocksize = AES_BLOCK_SIZE, | ||
968 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | ||
969 | .cra_alignmask = 0, | ||
970 | .cra_type = &crypto_aead_type, | ||
971 | .cra_module = THIS_MODULE, | ||
972 | .cra_init = qat_alg_sha256_init, | ||
973 | .cra_exit = qat_alg_exit, | ||
974 | .cra_u = { | ||
975 | .aead = { | ||
976 | .setkey = qat_alg_setkey, | ||
977 | .decrypt = qat_alg_dec, | ||
978 | .encrypt = qat_alg_enc, | ||
979 | .givencrypt = qat_alg_genivenc, | ||
980 | .ivsize = AES_BLOCK_SIZE, | ||
981 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
982 | }, | ||
983 | }, | ||
984 | }, { | ||
985 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | ||
986 | .cra_driver_name = "qat_aes_cbc_hmac_sha512", | ||
987 | .cra_priority = 4001, | ||
988 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
989 | .cra_blocksize = AES_BLOCK_SIZE, | ||
990 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | ||
991 | .cra_alignmask = 0, | ||
992 | .cra_type = &crypto_aead_type, | ||
993 | .cra_module = THIS_MODULE, | ||
994 | .cra_init = qat_alg_sha512_init, | ||
995 | .cra_exit = qat_alg_exit, | ||
996 | .cra_u = { | ||
997 | .aead = { | ||
998 | .setkey = qat_alg_setkey, | ||
999 | .decrypt = qat_alg_dec, | ||
1000 | .encrypt = qat_alg_enc, | ||
1001 | .givencrypt = qat_alg_genivenc, | ||
1002 | .ivsize = AES_BLOCK_SIZE, | ||
1003 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
1004 | }, | ||
1005 | }, | ||
1006 | } }; | ||
1007 | |||
1008 | int qat_algs_register(void) | ||
1009 | { | ||
1010 | if (atomic_add_return(1, &active_dev) == 1) { | ||
1011 | int i; | ||
1012 | |||
1013 | for (i = 0; i < ARRAY_SIZE(qat_algs); i++) | ||
1014 | qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD | | ||
1015 | CRYPTO_ALG_ASYNC; | ||
1016 | return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); | ||
1017 | } | ||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | int qat_algs_unregister(void) | ||
1022 | { | ||
1023 | if (atomic_sub_return(1, &active_dev) == 0) | ||
1024 | return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); | ||
1025 | return 0; | ||
1026 | } | ||
1027 | |||
1028 | int qat_algs_init(void) | ||
1029 | { | ||
1030 | atomic_set(&active_dev, 0); | ||
1031 | crypto_get_default_rng(); | ||
1032 | return 0; | ||
1033 | } | ||
1034 | |||
1035 | void qat_algs_exit(void) | ||
1036 | { | ||
1037 | crypto_put_default_rng(); | ||
1038 | } | ||
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c new file mode 100644 index 000000000000..0d59bcb50de1 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_crypto.c | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include "adf_accel_devices.h" | ||
50 | #include "adf_common_drv.h" | ||
51 | #include "adf_transport.h" | ||
52 | #include "adf_cfg.h" | ||
53 | #include "adf_cfg_strings.h" | ||
54 | #include "qat_crypto.h" | ||
55 | #include "icp_qat_fw.h" | ||
56 | |||
57 | #define SEC ADF_KERNEL_SEC | ||
58 | |||
59 | static struct service_hndl qat_crypto; | ||
60 | |||
61 | void qat_crypto_put_instance(struct qat_crypto_instance *inst) | ||
62 | { | ||
63 | if (atomic_sub_return(1, &inst->refctr) == 0) | ||
64 | adf_dev_put(inst->accel_dev); | ||
65 | } | ||
66 | |||
67 | static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) | ||
68 | { | ||
69 | struct qat_crypto_instance *inst; | ||
70 | struct list_head *list_ptr, *tmp; | ||
71 | int i; | ||
72 | |||
73 | list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) { | ||
74 | inst = list_entry(list_ptr, struct qat_crypto_instance, list); | ||
75 | |||
76 | for (i = 0; i < atomic_read(&inst->refctr); i++) | ||
77 | qat_crypto_put_instance(inst); | ||
78 | |||
79 | if (inst->sym_tx) | ||
80 | adf_remove_ring(inst->sym_tx); | ||
81 | |||
82 | if (inst->sym_rx) | ||
83 | adf_remove_ring(inst->sym_rx); | ||
84 | |||
85 | if (inst->pke_tx) | ||
86 | adf_remove_ring(inst->pke_tx); | ||
87 | |||
88 | if (inst->pke_rx) | ||
89 | adf_remove_ring(inst->pke_rx); | ||
90 | |||
91 | if (inst->rnd_tx) | ||
92 | adf_remove_ring(inst->rnd_tx); | ||
93 | |||
94 | if (inst->rnd_rx) | ||
95 | adf_remove_ring(inst->rnd_rx); | ||
96 | |||
97 | list_del(list_ptr); | ||
98 | kfree(inst); | ||
99 | } | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node) | ||
104 | { | ||
105 | struct adf_accel_dev *accel_dev = NULL; | ||
106 | struct qat_crypto_instance *inst_best = NULL; | ||
107 | struct list_head *itr; | ||
108 | unsigned long best = ~0; | ||
109 | |||
110 | list_for_each(itr, adf_devmgr_get_head()) { | ||
111 | accel_dev = list_entry(itr, struct adf_accel_dev, list); | ||
112 | if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) | ||
113 | break; | ||
114 | accel_dev = NULL; | ||
115 | } | ||
116 | if (!accel_dev) { | ||
117 | pr_err("QAT: Could not find device on give node\n"); | ||
118 | accel_dev = adf_devmgr_get_first(); | ||
119 | } | ||
120 | if (!accel_dev || !adf_dev_started(accel_dev)) | ||
121 | return NULL; | ||
122 | |||
123 | list_for_each(itr, &accel_dev->crypto_list) { | ||
124 | struct qat_crypto_instance *inst; | ||
125 | unsigned long cur; | ||
126 | |||
127 | inst = list_entry(itr, struct qat_crypto_instance, list); | ||
128 | cur = atomic_read(&inst->refctr); | ||
129 | if (best > cur) { | ||
130 | inst_best = inst; | ||
131 | best = cur; | ||
132 | } | ||
133 | } | ||
134 | if (inst_best) { | ||
135 | if (atomic_add_return(1, &inst_best->refctr) == 1) { | ||
136 | if (adf_dev_get(accel_dev)) { | ||
137 | atomic_dec(&inst_best->refctr); | ||
138 | pr_err("QAT: Could increment dev refctr\n"); | ||
139 | return NULL; | ||
140 | } | ||
141 | } | ||
142 | } | ||
143 | return inst_best; | ||
144 | } | ||
145 | |||
146 | static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) | ||
147 | { | ||
148 | int i; | ||
149 | unsigned long bank; | ||
150 | unsigned long num_inst, num_msg_sym, num_msg_asym; | ||
151 | int msg_size; | ||
152 | struct qat_crypto_instance *inst; | ||
153 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
154 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | ||
155 | |||
156 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
157 | strlcpy(key, ADF_NUM_CY, sizeof(key)); | ||
158 | |||
159 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | ||
160 | return -EFAULT; | ||
161 | |||
162 | if (kstrtoul(val, 0, &num_inst)) | ||
163 | return -EFAULT; | ||
164 | |||
165 | for (i = 0; i < num_inst; i++) { | ||
166 | inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, | ||
167 | accel_dev->numa_node); | ||
168 | if (!inst) | ||
169 | goto err; | ||
170 | |||
171 | list_add_tail(&inst->list, &accel_dev->crypto_list); | ||
172 | inst->id = i; | ||
173 | atomic_set(&inst->refctr, 0); | ||
174 | inst->accel_dev = accel_dev; | ||
175 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); | ||
176 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | ||
177 | goto err; | ||
178 | |||
179 | if (kstrtoul(val, 10, &bank)) | ||
180 | goto err; | ||
181 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); | ||
182 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | ||
183 | goto err; | ||
184 | |||
185 | if (kstrtoul(val, 10, &num_msg_sym)) | ||
186 | goto err; | ||
187 | num_msg_sym = num_msg_sym >> 1; | ||
188 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); | ||
189 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | ||
190 | goto err; | ||
191 | |||
192 | if (kstrtoul(val, 10, &num_msg_asym)) | ||
193 | goto err; | ||
194 | num_msg_asym = num_msg_asym >> 1; | ||
195 | |||
196 | msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ; | ||
197 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); | ||
198 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym, | ||
199 | msg_size, key, NULL, 0, &inst->sym_tx)) | ||
200 | goto err; | ||
201 | |||
202 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); | ||
203 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
204 | msg_size, key, NULL, 0, &inst->rnd_tx)) | ||
205 | goto err; | ||
206 | |||
207 | msg_size = msg_size >> 1; | ||
208 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); | ||
209 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
210 | msg_size, key, NULL, 0, &inst->pke_tx)) | ||
211 | goto err; | ||
212 | |||
213 | msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ; | ||
214 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); | ||
215 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym, | ||
216 | msg_size, key, qat_alg_callback, 0, | ||
217 | &inst->sym_rx)) | ||
218 | goto err; | ||
219 | |||
220 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); | ||
221 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
222 | msg_size, key, qat_alg_callback, 0, | ||
223 | &inst->rnd_rx)) | ||
224 | goto err; | ||
225 | |||
226 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); | ||
227 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
228 | msg_size, key, qat_alg_callback, 0, | ||
229 | &inst->pke_rx)) | ||
230 | goto err; | ||
231 | } | ||
232 | return 0; | ||
233 | err: | ||
234 | qat_crypto_free_instances(accel_dev); | ||
235 | return -ENOMEM; | ||
236 | } | ||
237 | |||
238 | static int qat_crypto_init(struct adf_accel_dev *accel_dev) | ||
239 | { | ||
240 | if (qat_crypto_create_instances(accel_dev)) | ||
241 | return -EFAULT; | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev) | ||
247 | { | ||
248 | return qat_crypto_free_instances(accel_dev); | ||
249 | } | ||
250 | |||
251 | static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev, | ||
252 | enum adf_event event) | ||
253 | { | ||
254 | int ret; | ||
255 | |||
256 | switch (event) { | ||
257 | case ADF_EVENT_INIT: | ||
258 | ret = qat_crypto_init(accel_dev); | ||
259 | break; | ||
260 | case ADF_EVENT_SHUTDOWN: | ||
261 | ret = qat_crypto_shutdown(accel_dev); | ||
262 | break; | ||
263 | case ADF_EVENT_RESTARTING: | ||
264 | case ADF_EVENT_RESTARTED: | ||
265 | case ADF_EVENT_START: | ||
266 | case ADF_EVENT_STOP: | ||
267 | default: | ||
268 | ret = 0; | ||
269 | } | ||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | int qat_crypto_register(void) | ||
274 | { | ||
275 | memset(&qat_crypto, 0, sizeof(qat_crypto)); | ||
276 | qat_crypto.event_hld = qat_crypto_event_handler; | ||
277 | qat_crypto.name = "qat_crypto"; | ||
278 | return adf_service_register(&qat_crypto); | ||
279 | } | ||
280 | |||
281 | int qat_crypto_unregister(void) | ||
282 | { | ||
283 | return adf_service_unregister(&qat_crypto); | ||
284 | } | ||
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h new file mode 100644 index 000000000000..ab8468d11ddb --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_crypto.h | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef _QAT_CRYPTO_INSTANCE_H_ | ||
48 | #define _QAT_CRYPTO_INSTANCE_H_ | ||
49 | |||
50 | #include <linux/list.h> | ||
51 | #include <linux/slab.h> | ||
52 | #include "adf_accel_devices.h" | ||
53 | #include "icp_qat_fw_la.h" | ||
54 | |||
55 | struct qat_crypto_instance { | ||
56 | struct adf_etr_ring_data *sym_tx; | ||
57 | struct adf_etr_ring_data *sym_rx; | ||
58 | struct adf_etr_ring_data *pke_tx; | ||
59 | struct adf_etr_ring_data *pke_rx; | ||
60 | struct adf_etr_ring_data *rnd_tx; | ||
61 | struct adf_etr_ring_data *rnd_rx; | ||
62 | struct adf_accel_dev *accel_dev; | ||
63 | struct list_head list; | ||
64 | unsigned long state; | ||
65 | int id; | ||
66 | atomic_t refctr; | ||
67 | }; | ||
68 | |||
69 | struct qat_crypto_request_buffs { | ||
70 | struct qat_alg_buf_list *bl; | ||
71 | dma_addr_t blp; | ||
72 | struct qat_alg_buf_list *blout; | ||
73 | dma_addr_t bloutp; | ||
74 | size_t sz; | ||
75 | }; | ||
76 | |||
77 | struct qat_crypto_request { | ||
78 | struct icp_qat_fw_la_bulk_req req; | ||
79 | struct qat_alg_session_ctx *ctx; | ||
80 | struct aead_request *areq; | ||
81 | struct qat_crypto_request_buffs buf; | ||
82 | }; | ||
83 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c new file mode 100644 index 000000000000..9b8a31521ff3 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_hal.c | |||
@@ -0,0 +1,1393 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/slab.h> | ||
48 | |||
49 | #include "adf_accel_devices.h" | ||
50 | #include "adf_common_drv.h" | ||
51 | #include "icp_qat_hal.h" | ||
52 | #include "icp_qat_uclo.h" | ||
53 | |||
54 | #define BAD_REGADDR 0xffff | ||
55 | #define MAX_RETRY_TIMES 10000 | ||
56 | #define INIT_CTX_ARB_VALUE 0x0 | ||
57 | #define INIT_CTX_ENABLE_VALUE 0x0 | ||
58 | #define INIT_PC_VALUE 0x0 | ||
59 | #define INIT_WAKEUP_EVENTS_VALUE 0x1 | ||
60 | #define INIT_SIG_EVENTS_VALUE 0x1 | ||
61 | #define INIT_CCENABLE_VALUE 0x2000 | ||
62 | #define RST_CSR_QAT_LSB 20 | ||
63 | #define RST_CSR_AE_LSB 0 | ||
64 | #define MC_TIMESTAMP_ENABLE (0x1 << 7) | ||
65 | |||
66 | #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \ | ||
67 | (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \ | ||
68 | (~(1 << CE_REG_PAR_ERR_BITPOS))) | ||
69 | #define INSERT_IMMED_GPRA_CONST(inst, const_val) \ | ||
70 | (inst = ((inst & 0xFFFF00C03FFull) | \ | ||
71 | ((((const_val) << 12) & 0x0FF00000ull) | \ | ||
72 | (((const_val) << 10) & 0x0003FC00ull)))) | ||
73 | #define INSERT_IMMED_GPRB_CONST(inst, const_val) \ | ||
74 | (inst = ((inst & 0xFFFF00FFF00ull) | \ | ||
75 | ((((const_val) << 12) & 0x0FF00000ull) | \ | ||
76 | (((const_val) << 0) & 0x000000FFull)))) | ||
77 | |||
78 | #define AE(handle, ae) handle->hal_handle->aes[ae] | ||
79 | |||
80 | static const uint64_t inst_4b[] = { | ||
81 | 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, | ||
82 | 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, | ||
83 | 0x0A021000000ull | ||
84 | }; | ||
85 | |||
86 | static const uint64_t inst[] = { | ||
87 | 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, | ||
88 | 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, | ||
89 | 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, | ||
90 | 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, | ||
91 | 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull, | ||
92 | 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, | ||
93 | 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, | ||
94 | 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, | ||
95 | 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, | ||
96 | 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, | ||
97 | 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, | ||
98 | 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, | ||
99 | 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, | ||
100 | 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, | ||
101 | 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, | ||
102 | 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, | ||
103 | 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, | ||
104 | 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, | ||
105 | 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, | ||
106 | 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, | ||
107 | 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, | ||
108 | 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull, | ||
109 | }; | ||
110 | |||
111 | void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, | ||
112 | unsigned char ae, unsigned int ctx_mask) | ||
113 | { | ||
114 | AE(handle, ae).live_ctx_mask = ctx_mask; | ||
115 | } | ||
116 | |||
117 | #define CSR_RETRY_TIMES 500 | ||
118 | static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, | ||
119 | unsigned char ae, unsigned int csr, | ||
120 | unsigned int *value) | ||
121 | { | ||
122 | unsigned int iterations = CSR_RETRY_TIMES; | ||
123 | |||
124 | do { | ||
125 | *value = GET_AE_CSR(handle, ae, csr); | ||
126 | if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) | ||
127 | return 0; | ||
128 | } while (iterations--); | ||
129 | |||
130 | pr_err("QAT: Read CSR timeout\n"); | ||
131 | return -EFAULT; | ||
132 | } | ||
133 | |||
134 | static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, | ||
135 | unsigned char ae, unsigned int csr, | ||
136 | unsigned int value) | ||
137 | { | ||
138 | unsigned int iterations = CSR_RETRY_TIMES; | ||
139 | |||
140 | do { | ||
141 | SET_AE_CSR(handle, ae, csr, value); | ||
142 | if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) | ||
143 | return 0; | ||
144 | } while (iterations--); | ||
145 | |||
146 | pr_err("QAT: Write CSR Timeout\n"); | ||
147 | return -EFAULT; | ||
148 | } | ||
149 | |||
150 | static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle, | ||
151 | unsigned char ae, unsigned char ctx, | ||
152 | unsigned int *events) | ||
153 | { | ||
154 | unsigned int cur_ctx; | ||
155 | |||
156 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
157 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
158 | qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); | ||
159 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
160 | } | ||
161 | |||
162 | static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, | ||
163 | unsigned char ae, unsigned int cycles, | ||
164 | int chk_inactive) | ||
165 | { | ||
166 | unsigned int base_cnt = 0, cur_cnt = 0; | ||
167 | unsigned int csr = (1 << ACS_ABO_BITPOS); | ||
168 | int times = MAX_RETRY_TIMES; | ||
169 | int elapsed_cycles = 0; | ||
170 | |||
171 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt); | ||
172 | base_cnt &= 0xffff; | ||
173 | while ((int)cycles > elapsed_cycles && times--) { | ||
174 | if (chk_inactive) | ||
175 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr); | ||
176 | |||
177 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt); | ||
178 | cur_cnt &= 0xffff; | ||
179 | elapsed_cycles = cur_cnt - base_cnt; | ||
180 | |||
181 | if (elapsed_cycles < 0) | ||
182 | elapsed_cycles += 0x10000; | ||
183 | |||
184 | /* ensure at least 8 time cycles elapsed in wait_cycles */ | ||
185 | if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) | ||
186 | return 0; | ||
187 | } | ||
188 | if (!times) { | ||
189 | pr_err("QAT: wait_num_cycles time out\n"); | ||
190 | return -EFAULT; | ||
191 | } | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | #define CLR_BIT(wrd, bit) (wrd & ~(1 << bit)) | ||
196 | #define SET_BIT(wrd, bit) (wrd | 1 << bit) | ||
197 | |||
198 | int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, | ||
199 | unsigned char ae, unsigned char mode) | ||
200 | { | ||
201 | unsigned int csr, new_csr; | ||
202 | |||
203 | if ((mode != 4) && (mode != 8)) { | ||
204 | pr_err("QAT: bad ctx mode=%d\n", mode); | ||
205 | return -EINVAL; | ||
206 | } | ||
207 | |||
208 | /* Sets the accelaration engine context mode to either four or eight */ | ||
209 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | ||
210 | csr = IGNORE_W1C_MASK & csr; | ||
211 | new_csr = (mode == 4) ? | ||
212 | SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : | ||
213 | CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS); | ||
214 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, | ||
219 | unsigned char ae, unsigned char mode) | ||
220 | { | ||
221 | unsigned int csr, new_csr; | ||
222 | |||
223 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | ||
224 | csr &= IGNORE_W1C_MASK; | ||
225 | |||
226 | new_csr = (mode) ? | ||
227 | SET_BIT(csr, CE_NN_MODE_BITPOS) : | ||
228 | CLR_BIT(csr, CE_NN_MODE_BITPOS); | ||
229 | |||
230 | if (new_csr != csr) | ||
231 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, | ||
237 | unsigned char ae, enum icp_qat_uof_regtype lm_type, | ||
238 | unsigned char mode) | ||
239 | { | ||
240 | unsigned int csr, new_csr; | ||
241 | |||
242 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | ||
243 | csr &= IGNORE_W1C_MASK; | ||
244 | switch (lm_type) { | ||
245 | case ICP_LMEM0: | ||
246 | new_csr = (mode) ? | ||
247 | SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) : | ||
248 | CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS); | ||
249 | break; | ||
250 | case ICP_LMEM1: | ||
251 | new_csr = (mode) ? | ||
252 | SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) : | ||
253 | CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS); | ||
254 | break; | ||
255 | default: | ||
256 | pr_err("QAT: lmType = 0x%x\n", lm_type); | ||
257 | return -EINVAL; | ||
258 | } | ||
259 | |||
260 | if (new_csr != csr) | ||
261 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr); | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static unsigned short qat_hal_get_reg_addr(unsigned int type, | ||
266 | unsigned short reg_num) | ||
267 | { | ||
268 | unsigned short reg_addr; | ||
269 | |||
270 | switch (type) { | ||
271 | case ICP_GPA_ABS: | ||
272 | case ICP_GPB_ABS: | ||
273 | reg_addr = 0x80 | (reg_num & 0x7f); | ||
274 | break; | ||
275 | case ICP_GPA_REL: | ||
276 | case ICP_GPB_REL: | ||
277 | reg_addr = reg_num & 0x1f; | ||
278 | break; | ||
279 | case ICP_SR_RD_REL: | ||
280 | case ICP_SR_WR_REL: | ||
281 | case ICP_SR_REL: | ||
282 | reg_addr = 0x180 | (reg_num & 0x1f); | ||
283 | break; | ||
284 | case ICP_SR_ABS: | ||
285 | reg_addr = 0x140 | ((reg_num & 0x3) << 1); | ||
286 | break; | ||
287 | case ICP_DR_RD_REL: | ||
288 | case ICP_DR_WR_REL: | ||
289 | case ICP_DR_REL: | ||
290 | reg_addr = 0x1c0 | (reg_num & 0x1f); | ||
291 | break; | ||
292 | case ICP_DR_ABS: | ||
293 | reg_addr = 0x100 | ((reg_num & 0x3) << 1); | ||
294 | break; | ||
295 | case ICP_NEIGH_REL: | ||
296 | reg_addr = 0x280 | (reg_num & 0x1f); | ||
297 | break; | ||
298 | case ICP_LMEM0: | ||
299 | reg_addr = 0x200; | ||
300 | break; | ||
301 | case ICP_LMEM1: | ||
302 | reg_addr = 0x220; | ||
303 | break; | ||
304 | case ICP_NO_DEST: | ||
305 | reg_addr = 0x300 | (reg_num & 0xff); | ||
306 | break; | ||
307 | default: | ||
308 | reg_addr = BAD_REGADDR; | ||
309 | break; | ||
310 | } | ||
311 | return reg_addr; | ||
312 | } | ||
313 | |||
314 | void qat_hal_reset(struct icp_qat_fw_loader_handle *handle) | ||
315 | { | ||
316 | unsigned int ae_reset_csr; | ||
317 | |||
318 | ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET); | ||
319 | ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB; | ||
320 | ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB; | ||
321 | SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr); | ||
322 | } | ||
323 | |||
324 | static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle, | ||
325 | unsigned char ae, unsigned int ctx_mask, | ||
326 | unsigned int ae_csr, unsigned int csr_val) | ||
327 | { | ||
328 | unsigned int ctx, cur_ctx; | ||
329 | |||
330 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
331 | |||
332 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | ||
333 | if (!(ctx_mask & (1 << ctx))) | ||
334 | continue; | ||
335 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
336 | qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val); | ||
337 | } | ||
338 | |||
339 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
340 | } | ||
341 | |||
342 | static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, | ||
343 | unsigned char ae, unsigned char ctx, | ||
344 | unsigned int ae_csr, unsigned int *csr_val) | ||
345 | { | ||
346 | unsigned int cur_ctx; | ||
347 | |||
348 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
349 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
350 | qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val); | ||
351 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
352 | } | ||
353 | |||
354 | static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, | ||
355 | unsigned char ae, unsigned int ctx_mask, | ||
356 | unsigned int events) | ||
357 | { | ||
358 | unsigned int ctx, cur_ctx; | ||
359 | |||
360 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
361 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | ||
362 | if (!(ctx_mask & (1 << ctx))) | ||
363 | continue; | ||
364 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
365 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events); | ||
366 | } | ||
367 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
368 | } | ||
369 | |||
370 | static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle, | ||
371 | unsigned char ae, unsigned int ctx_mask, | ||
372 | unsigned int events) | ||
373 | { | ||
374 | unsigned int ctx, cur_ctx; | ||
375 | |||
376 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | ||
377 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | ||
378 | if (!(ctx_mask & (1 << ctx))) | ||
379 | continue; | ||
380 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | ||
381 | qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, | ||
382 | events); | ||
383 | } | ||
384 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | ||
385 | } | ||
386 | |||
387 | static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) | ||
388 | { | ||
389 | unsigned int base_cnt, cur_cnt; | ||
390 | unsigned char ae; | ||
391 | unsigned int times = MAX_RETRY_TIMES; | ||
392 | |||
393 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
394 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
395 | continue; | ||
396 | |||
397 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, | ||
398 | (unsigned int *)&base_cnt); | ||
399 | base_cnt &= 0xffff; | ||
400 | |||
401 | do { | ||
402 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, | ||
403 | (unsigned int *)&cur_cnt); | ||
404 | cur_cnt &= 0xffff; | ||
405 | } while (times-- && (cur_cnt == base_cnt)); | ||
406 | |||
407 | if (!times) { | ||
408 | pr_err("QAT: AE%d is inactive!!\n", ae); | ||
409 | return -EFAULT; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) | ||
417 | { | ||
418 | unsigned int misc_ctl; | ||
419 | unsigned char ae; | ||
420 | |||
421 | /* stop the timestamp timers */ | ||
422 | misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL); | ||
423 | if (misc_ctl & MC_TIMESTAMP_ENABLE) | ||
424 | SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl & | ||
425 | (~MC_TIMESTAMP_ENABLE)); | ||
426 | |||
427 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
428 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
429 | continue; | ||
430 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); | ||
431 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); | ||
432 | } | ||
433 | /* start timestamp timers */ | ||
434 | SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE); | ||
435 | } | ||
436 | |||
437 | #define ESRAM_AUTO_TINIT (1<<2) | ||
438 | #define ESRAM_AUTO_TINIT_DONE (1<<3) | ||
439 | #define ESRAM_AUTO_INIT_USED_CYCLES (1640) | ||
440 | #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C | ||
441 | static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) | ||
442 | { | ||
443 | void __iomem *csr_addr = handle->hal_ep_csr_addr_v + | ||
444 | ESRAM_AUTO_INIT_CSR_OFFSET; | ||
445 | unsigned int csr_val, times = 30; | ||
446 | |||
447 | csr_val = ADF_CSR_RD(csr_addr, 0); | ||
448 | if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE)) | ||
449 | return 0; | ||
450 | |||
451 | csr_val = ADF_CSR_RD(csr_addr, 0); | ||
452 | csr_val |= ESRAM_AUTO_TINIT; | ||
453 | ADF_CSR_WR(csr_addr, 0, csr_val); | ||
454 | |||
455 | do { | ||
456 | qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0); | ||
457 | csr_val = ADF_CSR_RD(csr_addr, 0); | ||
458 | } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--); | ||
459 | if ((!times)) { | ||
460 | pr_err("QAT: Fail to init eSram!\n"); | ||
461 | return -EFAULT; | ||
462 | } | ||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | #define SHRAM_INIT_CYCLES 2060 | ||
467 | int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) | ||
468 | { | ||
469 | unsigned int ae_reset_csr; | ||
470 | unsigned char ae; | ||
471 | unsigned int clk_csr; | ||
472 | unsigned int times = 100; | ||
473 | unsigned int csr; | ||
474 | |||
475 | /* write to the reset csr */ | ||
476 | ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET); | ||
477 | ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB); | ||
478 | ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB); | ||
479 | do { | ||
480 | SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr); | ||
481 | if (!(times--)) | ||
482 | goto out_err; | ||
483 | csr = GET_GLB_CSR(handle, ICP_RESET); | ||
484 | } while ((handle->hal_handle->ae_mask | | ||
485 | (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr); | ||
486 | /* enable clock */ | ||
487 | clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE); | ||
488 | clk_csr |= handle->hal_handle->ae_mask << 0; | ||
489 | clk_csr |= handle->hal_handle->slice_mask << 20; | ||
490 | SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr); | ||
491 | if (qat_hal_check_ae_alive(handle)) | ||
492 | goto out_err; | ||
493 | |||
494 | /* Set undefined power-up/reset states to reasonable default values */ | ||
495 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
496 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
497 | continue; | ||
498 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, | ||
499 | INIT_CTX_ENABLE_VALUE); | ||
500 | qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, | ||
501 | CTX_STS_INDIRECT, | ||
502 | handle->hal_handle->upc_mask & | ||
503 | INIT_PC_VALUE); | ||
504 | qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); | ||
505 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); | ||
506 | qat_hal_put_wakeup_event(handle, ae, | ||
507 | ICP_QAT_UCLO_AE_ALL_CTX, | ||
508 | INIT_WAKEUP_EVENTS_VALUE); | ||
509 | qat_hal_put_sig_event(handle, ae, | ||
510 | ICP_QAT_UCLO_AE_ALL_CTX, | ||
511 | INIT_SIG_EVENTS_VALUE); | ||
512 | } | ||
513 | if (qat_hal_init_esram(handle)) | ||
514 | goto out_err; | ||
515 | if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0)) | ||
516 | goto out_err; | ||
517 | qat_hal_reset_timestamp(handle); | ||
518 | |||
519 | return 0; | ||
520 | out_err: | ||
521 | pr_err("QAT: failed to get device out of reset\n"); | ||
522 | return -EFAULT; | ||
523 | } | ||
524 | |||
525 | static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, | ||
526 | unsigned char ae, unsigned int ctx_mask) | ||
527 | { | ||
528 | unsigned int ctx; | ||
529 | |||
530 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); | ||
531 | ctx &= IGNORE_W1C_MASK & | ||
532 | (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); | ||
533 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); | ||
534 | } | ||
535 | |||
536 | static uint64_t qat_hal_parity_64bit(uint64_t word) | ||
537 | { | ||
538 | word ^= word >> 1; | ||
539 | word ^= word >> 2; | ||
540 | word ^= word >> 4; | ||
541 | word ^= word >> 8; | ||
542 | word ^= word >> 16; | ||
543 | word ^= word >> 32; | ||
544 | return word & 1; | ||
545 | } | ||
546 | |||
547 | static uint64_t qat_hal_set_uword_ecc(uint64_t uword) | ||
548 | { | ||
549 | uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, | ||
550 | bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, | ||
551 | bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, | ||
552 | bit6_mask = 0xdaf69a46910ULL; | ||
553 | |||
554 | /* clear the ecc bits */ | ||
555 | uword &= ~(0x7fULL << 0x2C); | ||
556 | uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C; | ||
557 | uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D; | ||
558 | uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E; | ||
559 | uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F; | ||
560 | uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30; | ||
561 | uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31; | ||
562 | uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32; | ||
563 | return uword; | ||
564 | } | ||
565 | |||
566 | void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, | ||
567 | unsigned char ae, unsigned int uaddr, | ||
568 | unsigned int words_num, uint64_t *uword) | ||
569 | { | ||
570 | unsigned int ustore_addr; | ||
571 | unsigned int i; | ||
572 | |||
573 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | ||
574 | uaddr |= UA_ECS; | ||
575 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
576 | for (i = 0; i < words_num; i++) { | ||
577 | unsigned int uwrd_lo, uwrd_hi; | ||
578 | uint64_t tmp; | ||
579 | |||
580 | tmp = qat_hal_set_uword_ecc(uword[i]); | ||
581 | uwrd_lo = (unsigned int)(tmp & 0xffffffff); | ||
582 | uwrd_hi = (unsigned int)(tmp >> 0x20); | ||
583 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); | ||
584 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); | ||
585 | } | ||
586 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | ||
587 | } | ||
588 | |||
589 | static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, | ||
590 | unsigned char ae, unsigned int ctx_mask) | ||
591 | { | ||
592 | unsigned int ctx; | ||
593 | |||
594 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); | ||
595 | ctx &= IGNORE_W1C_MASK; | ||
596 | ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; | ||
597 | ctx |= (ctx_mask << CE_ENABLE_BITPOS); | ||
598 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); | ||
599 | } | ||
600 | |||
601 | static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | ||
602 | { | ||
603 | unsigned char ae; | ||
604 | unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; | ||
605 | int times = MAX_RETRY_TIMES; | ||
606 | unsigned int csr_val = 0; | ||
607 | unsigned short reg; | ||
608 | unsigned int savctx = 0; | ||
609 | int ret = 0; | ||
610 | |||
611 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
612 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
613 | continue; | ||
614 | for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { | ||
615 | qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, | ||
616 | reg, 0); | ||
617 | qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, | ||
618 | reg, 0); | ||
619 | } | ||
620 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); | ||
621 | csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); | ||
622 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); | ||
623 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val); | ||
624 | csr_val &= IGNORE_W1C_MASK; | ||
625 | csr_val |= CE_NN_MODE; | ||
626 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); | ||
627 | qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst), | ||
628 | (uint64_t *)inst); | ||
629 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, | ||
630 | handle->hal_handle->upc_mask & | ||
631 | INIT_PC_VALUE); | ||
632 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | ||
633 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); | ||
634 | qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); | ||
635 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, | ||
636 | CTX_SIG_EVENTS_INDIRECT, 0); | ||
637 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); | ||
638 | qat_hal_enable_ctx(handle, ae, ctx_mask); | ||
639 | } | ||
640 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
641 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
642 | continue; | ||
643 | /* wait for AE to finish */ | ||
644 | do { | ||
645 | ret = qat_hal_wait_cycles(handle, ae, 20, 1); | ||
646 | } while (ret && times--); | ||
647 | |||
648 | if (!times) { | ||
649 | pr_err("QAT: clear GPR of AE %d failed", ae); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | qat_hal_disable_ctx(handle, ae, ctx_mask); | ||
653 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, | ||
654 | savctx & ACS_ACNO); | ||
655 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, | ||
656 | INIT_CTX_ENABLE_VALUE); | ||
657 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, | ||
658 | handle->hal_handle->upc_mask & | ||
659 | INIT_PC_VALUE); | ||
660 | qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE); | ||
661 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE); | ||
662 | qat_hal_put_wakeup_event(handle, ae, ctx_mask, | ||
663 | INIT_WAKEUP_EVENTS_VALUE); | ||
664 | qat_hal_put_sig_event(handle, ae, ctx_mask, | ||
665 | INIT_SIG_EVENTS_VALUE); | ||
666 | } | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | #define ICP_DH895XCC_AE_OFFSET 0x20000 | ||
671 | #define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) | ||
672 | #define LOCAL_TO_XFER_REG_OFFSET 0x800 | ||
673 | #define ICP_DH895XCC_EP_OFFSET 0x3a000 | ||
674 | #define ICP_DH895XCC_PMISC_BAR 1 | ||
675 | int qat_hal_init(struct adf_accel_dev *accel_dev) | ||
676 | { | ||
677 | unsigned char ae; | ||
678 | unsigned int max_en_ae_id = 0; | ||
679 | struct icp_qat_fw_loader_handle *handle; | ||
680 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | ||
681 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
682 | struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR]; | ||
683 | |||
684 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); | ||
685 | if (!handle) | ||
686 | return -ENOMEM; | ||
687 | |||
688 | handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + | ||
689 | ICP_DH895XCC_CAP_OFFSET; | ||
690 | handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr + | ||
691 | ICP_DH895XCC_AE_OFFSET; | ||
692 | handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET; | ||
693 | handle->hal_cap_ae_local_csr_addr_v = | ||
694 | handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; | ||
695 | |||
696 | handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); | ||
697 | if (!handle->hal_handle) | ||
698 | goto out_hal_handle; | ||
699 | handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid; | ||
700 | handle->hal_handle->ae_mask = hw_data->ae_mask; | ||
701 | handle->hal_handle->slice_mask = hw_data->accel_mask; | ||
702 | /* create AE objects */ | ||
703 | handle->hal_handle->upc_mask = 0x1ffff; | ||
704 | handle->hal_handle->max_ustore = 0x4000; | ||
705 | for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) { | ||
706 | if (!(hw_data->ae_mask & (1 << ae))) | ||
707 | continue; | ||
708 | handle->hal_handle->aes[ae].free_addr = 0; | ||
709 | handle->hal_handle->aes[ae].free_size = | ||
710 | handle->hal_handle->max_ustore; | ||
711 | handle->hal_handle->aes[ae].ustore_size = | ||
712 | handle->hal_handle->max_ustore; | ||
713 | handle->hal_handle->aes[ae].live_ctx_mask = | ||
714 | ICP_QAT_UCLO_AE_ALL_CTX; | ||
715 | max_en_ae_id = ae; | ||
716 | } | ||
717 | handle->hal_handle->ae_max_num = max_en_ae_id + 1; | ||
718 | /* take all AEs out of reset */ | ||
719 | if (qat_hal_clr_reset(handle)) { | ||
720 | pr_err("QAT: qat_hal_clr_reset error\n"); | ||
721 | goto out_err; | ||
722 | } | ||
723 | if (qat_hal_clear_gpr(handle)) | ||
724 | goto out_err; | ||
725 | /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ | ||
726 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
727 | unsigned int csr_val = 0; | ||
728 | |||
729 | if (!(hw_data->ae_mask & (1 << ae))) | ||
730 | continue; | ||
731 | qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); | ||
732 | csr_val |= 0x1; | ||
733 | qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); | ||
734 | } | ||
735 | accel_dev->fw_loader->fw_loader = handle; | ||
736 | return 0; | ||
737 | |||
738 | out_err: | ||
739 | kfree(handle->hal_handle); | ||
740 | out_hal_handle: | ||
741 | kfree(handle); | ||
742 | return -EFAULT; | ||
743 | } | ||
744 | |||
745 | void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) | ||
746 | { | ||
747 | if (!handle) | ||
748 | return; | ||
749 | kfree(handle->hal_handle); | ||
750 | kfree(handle); | ||
751 | } | ||
752 | |||
753 | void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
754 | unsigned int ctx_mask) | ||
755 | { | ||
756 | qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & | ||
757 | ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); | ||
758 | qat_hal_enable_ctx(handle, ae, ctx_mask); | ||
759 | } | ||
760 | |||
761 | void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | ||
762 | unsigned int ctx_mask) | ||
763 | { | ||
764 | qat_hal_disable_ctx(handle, ae, ctx_mask); | ||
765 | } | ||
766 | |||
767 | void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, | ||
768 | unsigned char ae, unsigned int ctx_mask, unsigned int upc) | ||
769 | { | ||
770 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, | ||
771 | handle->hal_handle->upc_mask & upc); | ||
772 | } | ||
773 | |||
774 | static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, | ||
775 | unsigned char ae, unsigned int uaddr, | ||
776 | unsigned int words_num, uint64_t *uword) | ||
777 | { | ||
778 | unsigned int i, uwrd_lo, uwrd_hi; | ||
779 | unsigned int ustore_addr, misc_control; | ||
780 | |||
781 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control); | ||
782 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, | ||
783 | misc_control & 0xfffffffb); | ||
784 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | ||
785 | uaddr |= UA_ECS; | ||
786 | for (i = 0; i < words_num; i++) { | ||
787 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
788 | uaddr++; | ||
789 | qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo); | ||
790 | qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi); | ||
791 | uword[i] = uwrd_hi; | ||
792 | uword[i] = (uword[i] << 0x20) | uwrd_lo; | ||
793 | } | ||
794 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control); | ||
795 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | ||
796 | } | ||
797 | |||
798 | void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, | ||
799 | unsigned char ae, unsigned int uaddr, | ||
800 | unsigned int words_num, unsigned int *data) | ||
801 | { | ||
802 | unsigned int i, ustore_addr; | ||
803 | |||
804 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | ||
805 | uaddr |= UA_ECS; | ||
806 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
807 | for (i = 0; i < words_num; i++) { | ||
808 | unsigned int uwrd_lo, uwrd_hi, tmp; | ||
809 | |||
810 | uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) | | ||
811 | ((data[i] & 0xff00) << 2) | | ||
812 | (0x3 << 8) | (data[i] & 0xff); | ||
813 | uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28); | ||
814 | uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8; | ||
815 | tmp = ((data[i] >> 0x10) & 0xffff); | ||
816 | uwrd_hi |= (hweight32(tmp) & 0x1) << 9; | ||
817 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); | ||
818 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); | ||
819 | } | ||
820 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | ||
821 | } | ||
822 | |||
823 | #define MAX_EXEC_INST 100 | ||
824 | static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, | ||
825 | unsigned char ae, unsigned char ctx, | ||
826 | uint64_t *micro_inst, unsigned int inst_num, | ||
827 | int code_off, unsigned int max_cycle, | ||
828 | unsigned int *endpc) | ||
829 | { | ||
830 | uint64_t savuwords[MAX_EXEC_INST]; | ||
831 | unsigned int ind_lm_addr0, ind_lm_addr1; | ||
832 | unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; | ||
833 | unsigned int ind_cnt_sig; | ||
834 | unsigned int ind_sig, act_sig; | ||
835 | unsigned int csr_val = 0, newcsr_val; | ||
836 | unsigned int savctx; | ||
837 | unsigned int savcc, wakeup_events, savpc; | ||
838 | unsigned int ctxarb_ctl, ctx_enables; | ||
839 | |||
840 | if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) { | ||
841 | pr_err("QAT: invalid instruction num %d\n", inst_num); | ||
842 | return -EINVAL; | ||
843 | } | ||
844 | /* save current context */ | ||
845 | qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0); | ||
846 | qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1); | ||
847 | qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, | ||
848 | &ind_lm_addr_byte0); | ||
849 | qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, | ||
850 | &ind_lm_addr_byte1); | ||
851 | if (inst_num <= MAX_EXEC_INST) | ||
852 | qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); | ||
853 | qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); | ||
854 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc); | ||
855 | savpc = (savpc & handle->hal_handle->upc_mask) >> 0; | ||
856 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
857 | ctx_enables &= IGNORE_W1C_MASK; | ||
858 | qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc); | ||
859 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | ||
860 | qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl); | ||
861 | qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, | ||
862 | &ind_cnt_sig); | ||
863 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig); | ||
864 | qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig); | ||
865 | /* execute micro codes */ | ||
866 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
867 | qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); | ||
868 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0); | ||
869 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO); | ||
870 | if (code_off) | ||
871 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff); | ||
872 | qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY); | ||
873 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0); | ||
874 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); | ||
875 | qat_hal_enable_ctx(handle, ae, (1 << ctx)); | ||
876 | /* wait for micro codes to finish */ | ||
877 | if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0) | ||
878 | return -EFAULT; | ||
879 | if (endpc) { | ||
880 | unsigned int ctx_status; | ||
881 | |||
882 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, | ||
883 | &ctx_status); | ||
884 | *endpc = ctx_status & handle->hal_handle->upc_mask; | ||
885 | } | ||
886 | /* retore to saved context */ | ||
887 | qat_hal_disable_ctx(handle, ae, (1 << ctx)); | ||
888 | if (inst_num <= MAX_EXEC_INST) | ||
889 | qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords); | ||
890 | qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); | ||
891 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, | ||
892 | handle->hal_handle->upc_mask & savpc); | ||
893 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); | ||
894 | newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); | ||
895 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); | ||
896 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); | ||
897 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO); | ||
898 | qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl); | ||
899 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
900 | LM_ADDR_0_INDIRECT, ind_lm_addr0); | ||
901 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
902 | LM_ADDR_1_INDIRECT, ind_lm_addr1); | ||
903 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
904 | INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0); | ||
905 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
906 | INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1); | ||
907 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
908 | FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig); | ||
909 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), | ||
910 | CTX_SIG_EVENTS_INDIRECT, ind_sig); | ||
911 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig); | ||
912 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
913 | |||
914 | return 0; | ||
915 | } | ||
916 | |||
917 | static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, | ||
918 | unsigned char ae, unsigned char ctx, | ||
919 | enum icp_qat_uof_regtype reg_type, | ||
920 | unsigned short reg_num, unsigned int *data) | ||
921 | { | ||
922 | unsigned int savctx, uaddr, uwrd_lo, uwrd_hi; | ||
923 | unsigned int ctxarb_cntl, ustore_addr, ctx_enables; | ||
924 | unsigned short reg_addr; | ||
925 | int status = 0; | ||
926 | uint64_t insts, savuword; | ||
927 | |||
928 | reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); | ||
929 | if (reg_addr == BAD_REGADDR) { | ||
930 | pr_err("QAT: bad regaddr=0x%x\n", reg_addr); | ||
931 | return -EINVAL; | ||
932 | } | ||
933 | switch (reg_type) { | ||
934 | case ICP_GPA_REL: | ||
935 | insts = 0xA070000000ull | (reg_addr & 0x3ff); | ||
936 | break; | ||
937 | default: | ||
938 | insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); | ||
939 | break; | ||
940 | } | ||
941 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | ||
942 | qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl); | ||
943 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
944 | ctx_enables &= IGNORE_W1C_MASK; | ||
945 | if (ctx != (savctx & ACS_ACNO)) | ||
946 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, | ||
947 | ctx & ACS_ACNO); | ||
948 | qat_hal_get_uwords(handle, ae, 0, 1, &savuword); | ||
949 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
950 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | ||
951 | uaddr = UA_ECS; | ||
952 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
953 | insts = qat_hal_set_uword_ecc(insts); | ||
954 | uwrd_lo = (unsigned int)(insts & 0xffffffff); | ||
955 | uwrd_hi = (unsigned int)(insts >> 0x20); | ||
956 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo); | ||
957 | qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi); | ||
958 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | ||
959 | /* delay for at least 8 cycles */ | ||
960 | qat_hal_wait_cycles(handle, ae, 0x8, 0); | ||
961 | /* | ||
962 | * read ALU output | ||
963 | * the instruction should have been executed | ||
964 | * prior to clearing the ECS in putUwords | ||
965 | */ | ||
966 | qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data); | ||
967 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | ||
968 | qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); | ||
969 | if (ctx != (savctx & ACS_ACNO)) | ||
970 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, | ||
971 | savctx & ACS_ACNO); | ||
972 | qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl); | ||
973 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
974 | |||
975 | return status; | ||
976 | } | ||
977 | |||
978 | static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, | ||
979 | unsigned char ae, unsigned char ctx, | ||
980 | enum icp_qat_uof_regtype reg_type, | ||
981 | unsigned short reg_num, unsigned int data) | ||
982 | { | ||
983 | unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; | ||
984 | uint64_t insts[] = { | ||
985 | 0x0F440000000ull, | ||
986 | 0x0F040000000ull, | ||
987 | 0x0F0000C0300ull, | ||
988 | 0x0E000010000ull | ||
989 | }; | ||
990 | const int num_inst = ARRAY_SIZE(insts), code_off = 1; | ||
991 | const int imm_w1 = 0, imm_w0 = 1; | ||
992 | |||
993 | dest_addr = qat_hal_get_reg_addr(reg_type, reg_num); | ||
994 | if (dest_addr == BAD_REGADDR) { | ||
995 | pr_err("QAT: bad destAddr=0x%x\n", dest_addr); | ||
996 | return -EINVAL; | ||
997 | } | ||
998 | |||
999 | data16lo = 0xffff & data; | ||
1000 | data16hi = 0xffff & (data >> 0x10); | ||
1001 | src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short) | ||
1002 | (0xff & data16hi)); | ||
1003 | src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short) | ||
1004 | (0xff & data16lo)); | ||
1005 | switch (reg_type) { | ||
1006 | case ICP_GPA_REL: | ||
1007 | insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | | ||
1008 | ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); | ||
1009 | insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | | ||
1010 | ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff); | ||
1011 | break; | ||
1012 | default: | ||
1013 | insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) | | ||
1014 | ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); | ||
1015 | |||
1016 | insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) | | ||
1017 | ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); | ||
1018 | break; | ||
1019 | } | ||
1020 | |||
1021 | return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst, | ||
1022 | code_off, num_inst * 0x5, NULL); | ||
1023 | } | ||
1024 | |||
1025 | int qat_hal_get_ins_num(void) | ||
1026 | { | ||
1027 | return ARRAY_SIZE(inst_4b); | ||
1028 | } | ||
1029 | |||
1030 | static int qat_hal_concat_micro_code(uint64_t *micro_inst, | ||
1031 | unsigned int inst_num, unsigned int size, | ||
1032 | unsigned int addr, unsigned int *value) | ||
1033 | { | ||
1034 | int i, val_indx; | ||
1035 | unsigned int cur_value; | ||
1036 | const uint64_t *inst_arr; | ||
1037 | int fixup_offset; | ||
1038 | int usize = 0; | ||
1039 | int orig_num; | ||
1040 | |||
1041 | orig_num = inst_num; | ||
1042 | val_indx = 0; | ||
1043 | cur_value = value[val_indx++]; | ||
1044 | inst_arr = inst_4b; | ||
1045 | usize = ARRAY_SIZE(inst_4b); | ||
1046 | fixup_offset = inst_num; | ||
1047 | for (i = 0; i < usize; i++) | ||
1048 | micro_inst[inst_num++] = inst_arr[i]; | ||
1049 | INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr)); | ||
1050 | fixup_offset++; | ||
1051 | INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0); | ||
1052 | fixup_offset++; | ||
1053 | INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0)); | ||
1054 | fixup_offset++; | ||
1055 | INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10)); | ||
1056 | |||
1057 | return inst_num - orig_num; | ||
1058 | } | ||
1059 | |||
1060 | static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, | ||
1061 | unsigned char ae, unsigned char ctx, | ||
1062 | int *pfirst_exec, uint64_t *micro_inst, | ||
1063 | unsigned int inst_num) | ||
1064 | { | ||
1065 | int stat = 0; | ||
1066 | unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0; | ||
1067 | unsigned int gprb0 = 0, gprb1 = 0; | ||
1068 | |||
1069 | if (*pfirst_exec) { | ||
1070 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0); | ||
1071 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1); | ||
1072 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2); | ||
1073 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0); | ||
1074 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1); | ||
1075 | *pfirst_exec = 0; | ||
1076 | } | ||
1077 | stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1, | ||
1078 | inst_num * 0x5, NULL); | ||
1079 | if (stat != 0) | ||
1080 | return -EFAULT; | ||
1081 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0); | ||
1082 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1); | ||
1083 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2); | ||
1084 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0); | ||
1085 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1); | ||
1086 | |||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, | ||
1091 | unsigned char ae, | ||
1092 | struct icp_qat_uof_batch_init *lm_init_header) | ||
1093 | { | ||
1094 | struct icp_qat_uof_batch_init *plm_init; | ||
1095 | uint64_t *micro_inst_arry; | ||
1096 | int micro_inst_num; | ||
1097 | int alloc_inst_size; | ||
1098 | int first_exec = 1; | ||
1099 | int stat = 0; | ||
1100 | |||
1101 | plm_init = lm_init_header->next; | ||
1102 | alloc_inst_size = lm_init_header->size; | ||
1103 | if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) | ||
1104 | alloc_inst_size = handle->hal_handle->max_ustore; | ||
1105 | micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t), | ||
1106 | GFP_KERNEL); | ||
1107 | if (!micro_inst_arry) | ||
1108 | return -ENOMEM; | ||
1109 | micro_inst_num = 0; | ||
1110 | while (plm_init) { | ||
1111 | unsigned int addr, *value, size; | ||
1112 | |||
1113 | ae = plm_init->ae; | ||
1114 | addr = plm_init->addr; | ||
1115 | value = plm_init->value; | ||
1116 | size = plm_init->size; | ||
1117 | micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry, | ||
1118 | micro_inst_num, | ||
1119 | size, addr, value); | ||
1120 | plm_init = plm_init->next; | ||
1121 | } | ||
1122 | /* exec micro codes */ | ||
1123 | if (micro_inst_arry && (micro_inst_num > 0)) { | ||
1124 | micro_inst_arry[micro_inst_num++] = 0x0E000010000ull; | ||
1125 | stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec, | ||
1126 | micro_inst_arry, | ||
1127 | micro_inst_num); | ||
1128 | } | ||
1129 | kfree(micro_inst_arry); | ||
1130 | return stat; | ||
1131 | } | ||
1132 | |||
1133 | static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, | ||
1134 | unsigned char ae, unsigned char ctx, | ||
1135 | enum icp_qat_uof_regtype reg_type, | ||
1136 | unsigned short reg_num, unsigned int val) | ||
1137 | { | ||
1138 | int status = 0; | ||
1139 | unsigned int reg_addr; | ||
1140 | unsigned int ctx_enables; | ||
1141 | unsigned short mask; | ||
1142 | unsigned short dr_offset = 0x10; | ||
1143 | |||
1144 | status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
1145 | if (CE_INUSE_CONTEXTS & ctx_enables) { | ||
1146 | if (ctx & 0x1) { | ||
1147 | pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); | ||
1148 | return -EINVAL; | ||
1149 | } | ||
1150 | mask = 0x1f; | ||
1151 | dr_offset = 0x20; | ||
1152 | } else { | ||
1153 | mask = 0x0f; | ||
1154 | } | ||
1155 | if (reg_num & ~mask) | ||
1156 | return -EINVAL; | ||
1157 | reg_addr = reg_num + (ctx << 0x5); | ||
1158 | switch (reg_type) { | ||
1159 | case ICP_SR_RD_REL: | ||
1160 | case ICP_SR_REL: | ||
1161 | SET_AE_XFER(handle, ae, reg_addr, val); | ||
1162 | break; | ||
1163 | case ICP_DR_RD_REL: | ||
1164 | case ICP_DR_REL: | ||
1165 | SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val); | ||
1166 | break; | ||
1167 | default: | ||
1168 | status = -EINVAL; | ||
1169 | break; | ||
1170 | } | ||
1171 | return status; | ||
1172 | } | ||
1173 | |||
1174 | static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, | ||
1175 | unsigned char ae, unsigned char ctx, | ||
1176 | enum icp_qat_uof_regtype reg_type, | ||
1177 | unsigned short reg_num, unsigned int data) | ||
1178 | { | ||
1179 | unsigned int gprval, ctx_enables; | ||
1180 | unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi, | ||
1181 | data16low; | ||
1182 | unsigned short reg_mask; | ||
1183 | int status = 0; | ||
1184 | uint64_t micro_inst[] = { | ||
1185 | 0x0F440000000ull, | ||
1186 | 0x0F040000000ull, | ||
1187 | 0x0A000000000ull, | ||
1188 | 0x0F0000C0300ull, | ||
1189 | 0x0E000010000ull | ||
1190 | }; | ||
1191 | const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; | ||
1192 | const unsigned short gprnum = 0, dly = num_inst * 0x5; | ||
1193 | |||
1194 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
1195 | if (CE_INUSE_CONTEXTS & ctx_enables) { | ||
1196 | if (ctx & 0x1) { | ||
1197 | pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); | ||
1198 | return -EINVAL; | ||
1199 | } | ||
1200 | reg_mask = (unsigned short)~0x1f; | ||
1201 | } else { | ||
1202 | reg_mask = (unsigned short)~0xf; | ||
1203 | } | ||
1204 | if (reg_num & reg_mask) | ||
1205 | return -EINVAL; | ||
1206 | xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num); | ||
1207 | if (xfr_addr == BAD_REGADDR) { | ||
1208 | pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr); | ||
1209 | return -EINVAL; | ||
1210 | } | ||
1211 | qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); | ||
1212 | gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum); | ||
1213 | data16low = 0xffff & data; | ||
1214 | data16hi = 0xffff & (data >> 0x10); | ||
1215 | src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, | ||
1216 | (unsigned short)(0xff & data16hi)); | ||
1217 | src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, | ||
1218 | (unsigned short)(0xff & data16low)); | ||
1219 | micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) | | ||
1220 | ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff); | ||
1221 | micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) | | ||
1222 | ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff); | ||
1223 | micro_inst[0x2] = micro_inst[0x2] | | ||
1224 | ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10); | ||
1225 | status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst, | ||
1226 | code_off, dly, NULL); | ||
1227 | qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval); | ||
1228 | return status; | ||
1229 | } | ||
1230 | |||
1231 | static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle, | ||
1232 | unsigned char ae, unsigned char ctx, | ||
1233 | unsigned short nn, unsigned int val) | ||
1234 | { | ||
1235 | unsigned int ctx_enables; | ||
1236 | int stat = 0; | ||
1237 | |||
1238 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
1239 | ctx_enables &= IGNORE_W1C_MASK; | ||
1240 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); | ||
1241 | |||
1242 | stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val); | ||
1243 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | ||
1244 | return stat; | ||
1245 | } | ||
1246 | |||
1247 | static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle | ||
1248 | *handle, unsigned char ae, | ||
1249 | unsigned short absreg_num, | ||
1250 | unsigned short *relreg, | ||
1251 | unsigned char *ctx) | ||
1252 | { | ||
1253 | unsigned int ctx_enables; | ||
1254 | |||
1255 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | ||
1256 | if (ctx_enables & CE_INUSE_CONTEXTS) { | ||
1257 | /* 4-ctx mode */ | ||
1258 | *relreg = absreg_num & 0x1F; | ||
1259 | *ctx = (absreg_num >> 0x4) & 0x6; | ||
1260 | } else { | ||
1261 | /* 8-ctx mode */ | ||
1262 | *relreg = absreg_num & 0x0F; | ||
1263 | *ctx = (absreg_num >> 0x4) & 0x7; | ||
1264 | } | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle, | ||
1269 | unsigned char ae, unsigned char ctx_mask, | ||
1270 | enum icp_qat_uof_regtype reg_type, | ||
1271 | unsigned short reg_num, unsigned int regdata) | ||
1272 | { | ||
1273 | int stat = 0; | ||
1274 | unsigned short reg; | ||
1275 | unsigned char ctx = 0; | ||
1276 | enum icp_qat_uof_regtype type; | ||
1277 | |||
1278 | if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG) | ||
1279 | return -EINVAL; | ||
1280 | |||
1281 | do { | ||
1282 | if (ctx_mask == 0) { | ||
1283 | qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, | ||
1284 | &ctx); | ||
1285 | type = reg_type - 1; | ||
1286 | } else { | ||
1287 | reg = reg_num; | ||
1288 | type = reg_type; | ||
1289 | if (!test_bit(ctx, (unsigned long *)&ctx_mask)) | ||
1290 | continue; | ||
1291 | } | ||
1292 | stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata); | ||
1293 | if (stat) { | ||
1294 | pr_err("QAT: write gpr fail\n"); | ||
1295 | return -EINVAL; | ||
1296 | } | ||
1297 | } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); | ||
1298 | |||
1299 | return 0; | ||
1300 | } | ||
1301 | |||
1302 | int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle, | ||
1303 | unsigned char ae, unsigned char ctx_mask, | ||
1304 | enum icp_qat_uof_regtype reg_type, | ||
1305 | unsigned short reg_num, unsigned int regdata) | ||
1306 | { | ||
1307 | int stat = 0; | ||
1308 | unsigned short reg; | ||
1309 | unsigned char ctx = 0; | ||
1310 | enum icp_qat_uof_regtype type; | ||
1311 | |||
1312 | if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) | ||
1313 | return -EINVAL; | ||
1314 | |||
1315 | do { | ||
1316 | if (ctx_mask == 0) { | ||
1317 | qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, | ||
1318 | &ctx); | ||
1319 | type = reg_type - 3; | ||
1320 | } else { | ||
1321 | reg = reg_num; | ||
1322 | type = reg_type; | ||
1323 | if (!test_bit(ctx, (unsigned long *)&ctx_mask)) | ||
1324 | continue; | ||
1325 | } | ||
1326 | stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg, | ||
1327 | regdata); | ||
1328 | if (stat) { | ||
1329 | pr_err("QAT: write wr xfer fail\n"); | ||
1330 | return -EINVAL; | ||
1331 | } | ||
1332 | } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); | ||
1333 | |||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, | ||
1338 | unsigned char ae, unsigned char ctx_mask, | ||
1339 | enum icp_qat_uof_regtype reg_type, | ||
1340 | unsigned short reg_num, unsigned int regdata) | ||
1341 | { | ||
1342 | int stat = 0; | ||
1343 | unsigned short reg; | ||
1344 | unsigned char ctx = 0; | ||
1345 | enum icp_qat_uof_regtype type; | ||
1346 | |||
1347 | if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG) | ||
1348 | return -EINVAL; | ||
1349 | |||
1350 | do { | ||
1351 | if (ctx_mask == 0) { | ||
1352 | qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®, | ||
1353 | &ctx); | ||
1354 | type = reg_type - 3; | ||
1355 | } else { | ||
1356 | reg = reg_num; | ||
1357 | type = reg_type; | ||
1358 | if (!test_bit(ctx, (unsigned long *)&ctx_mask)) | ||
1359 | continue; | ||
1360 | } | ||
1361 | stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg, | ||
1362 | regdata); | ||
1363 | if (stat) { | ||
1364 | pr_err("QAT: write rd xfer fail\n"); | ||
1365 | return -EINVAL; | ||
1366 | } | ||
1367 | } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX)); | ||
1368 | |||
1369 | return 0; | ||
1370 | } | ||
1371 | |||
1372 | int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, | ||
1373 | unsigned char ae, unsigned char ctx_mask, | ||
1374 | unsigned short reg_num, unsigned int regdata) | ||
1375 | { | ||
1376 | int stat = 0; | ||
1377 | unsigned char ctx; | ||
1378 | |||
1379 | if (ctx_mask == 0) | ||
1380 | return -EINVAL; | ||
1381 | |||
1382 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | ||
1383 | if (!test_bit(ctx, (unsigned long *)&ctx_mask)) | ||
1384 | continue; | ||
1385 | stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata); | ||
1386 | if (stat) { | ||
1387 | pr_err("QAT: write neigh error\n"); | ||
1388 | return -EINVAL; | ||
1389 | } | ||
1390 | } | ||
1391 | |||
1392 | return 0; | ||
1393 | } | ||
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c new file mode 100644 index 000000000000..1e27f9f7fddf --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_uclo.c | |||
@@ -0,0 +1,1181 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/slab.h> | ||
48 | #include <linux/ctype.h> | ||
49 | #include <linux/kernel.h> | ||
50 | |||
51 | #include "adf_accel_devices.h" | ||
52 | #include "adf_common_drv.h" | ||
53 | #include "icp_qat_uclo.h" | ||
54 | #include "icp_qat_hal.h" | ||
55 | #include "icp_qat_fw_loader_handle.h" | ||
56 | |||
57 | #define UWORD_CPYBUF_SIZE 1024 | ||
58 | #define INVLD_UWORD 0xffffffffffull | ||
59 | #define PID_MINOR_REV 0xf | ||
60 | #define PID_MAJOR_REV (0xf << 4) | ||
61 | |||
62 | static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, | ||
63 | unsigned int ae, unsigned int image_num) | ||
64 | { | ||
65 | struct icp_qat_uclo_aedata *ae_data; | ||
66 | struct icp_qat_uclo_encapme *encap_image; | ||
67 | struct icp_qat_uclo_page *page = NULL; | ||
68 | struct icp_qat_uclo_aeslice *ae_slice = NULL; | ||
69 | |||
70 | ae_data = &obj_handle->ae_data[ae]; | ||
71 | encap_image = &obj_handle->ae_uimage[image_num]; | ||
72 | ae_slice = &ae_data->ae_slices[ae_data->slice_num]; | ||
73 | ae_slice->encap_image = encap_image; | ||
74 | |||
75 | if (encap_image->img_ptr) { | ||
76 | ae_slice->ctx_mask_assigned = | ||
77 | encap_image->img_ptr->ctx_assigned; | ||
78 | ae_data->eff_ustore_size = obj_handle->ustore_phy_size; | ||
79 | } else { | ||
80 | ae_slice->ctx_mask_assigned = 0; | ||
81 | } | ||
82 | ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL); | ||
83 | if (!ae_slice->region) | ||
84 | return -ENOMEM; | ||
85 | ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL); | ||
86 | if (!ae_slice->page) | ||
87 | goto out_err; | ||
88 | page = ae_slice->page; | ||
89 | page->encap_page = encap_image->page; | ||
90 | ae_slice->page->region = ae_slice->region; | ||
91 | ae_data->slice_num++; | ||
92 | return 0; | ||
93 | out_err: | ||
94 | kfree(ae_slice->region); | ||
95 | ae_slice->region = NULL; | ||
96 | return -ENOMEM; | ||
97 | } | ||
98 | |||
99 | static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) | ||
100 | { | ||
101 | unsigned int i; | ||
102 | |||
103 | if (!ae_data) { | ||
104 | pr_err("QAT: bad argument, ae_data is NULL\n "); | ||
105 | return -EINVAL; | ||
106 | } | ||
107 | |||
108 | for (i = 0; i < ae_data->slice_num; i++) { | ||
109 | kfree(ae_data->ae_slices[i].region); | ||
110 | ae_data->ae_slices[i].region = NULL; | ||
111 | kfree(ae_data->ae_slices[i].page); | ||
112 | ae_data->ae_slices[i].page = NULL; | ||
113 | } | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, | ||
118 | unsigned int str_offset) | ||
119 | { | ||
120 | if ((!str_table->table_len) || (str_offset > str_table->table_len)) | ||
121 | return NULL; | ||
122 | return (char *)(((unsigned long)(str_table->strings)) + str_offset); | ||
123 | } | ||
124 | |||
125 | static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) | ||
126 | { | ||
127 | int maj = hdr->maj_ver & 0xff; | ||
128 | int min = hdr->min_ver & 0xff; | ||
129 | |||
130 | if (hdr->file_id != ICP_QAT_UOF_FID) { | ||
131 | pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); | ||
132 | return -EINVAL; | ||
133 | } | ||
134 | if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { | ||
135 | pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", | ||
136 | maj, min); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, | ||
143 | unsigned int addr, unsigned int *val, | ||
144 | unsigned int num_in_bytes) | ||
145 | { | ||
146 | unsigned int outval; | ||
147 | unsigned char *ptr = (unsigned char *)val; | ||
148 | |||
149 | while (num_in_bytes) { | ||
150 | memcpy(&outval, ptr, 4); | ||
151 | SRAM_WRITE(handle, addr, outval); | ||
152 | num_in_bytes -= 4; | ||
153 | ptr += 4; | ||
154 | addr += 4; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, | ||
159 | unsigned char ae, unsigned int addr, | ||
160 | unsigned int *val, | ||
161 | unsigned int num_in_bytes) | ||
162 | { | ||
163 | unsigned int outval; | ||
164 | unsigned char *ptr = (unsigned char *)val; | ||
165 | |||
166 | addr >>= 0x2; /* convert to uword address */ | ||
167 | |||
168 | while (num_in_bytes) { | ||
169 | memcpy(&outval, ptr, 4); | ||
170 | qat_hal_wr_umem(handle, ae, addr++, 1, &outval); | ||
171 | num_in_bytes -= 4; | ||
172 | ptr += 4; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, | ||
177 | unsigned char ae, | ||
178 | struct icp_qat_uof_batch_init | ||
179 | *umem_init_header) | ||
180 | { | ||
181 | struct icp_qat_uof_batch_init *umem_init; | ||
182 | |||
183 | if (!umem_init_header) | ||
184 | return; | ||
185 | umem_init = umem_init_header->next; | ||
186 | while (umem_init) { | ||
187 | unsigned int addr, *value, size; | ||
188 | |||
189 | ae = umem_init->ae; | ||
190 | addr = umem_init->addr; | ||
191 | value = umem_init->value; | ||
192 | size = umem_init->size; | ||
193 | qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); | ||
194 | umem_init = umem_init->next; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, | ||
200 | struct icp_qat_uof_batch_init **base) | ||
201 | { | ||
202 | struct icp_qat_uof_batch_init *umem_init; | ||
203 | |||
204 | umem_init = *base; | ||
205 | while (umem_init) { | ||
206 | struct icp_qat_uof_batch_init *pre; | ||
207 | |||
208 | pre = umem_init; | ||
209 | umem_init = umem_init->next; | ||
210 | kfree(pre); | ||
211 | } | ||
212 | *base = NULL; | ||
213 | } | ||
214 | |||
215 | static int qat_uclo_parse_num(char *str, unsigned int *num) | ||
216 | { | ||
217 | char buf[16] = {0}; | ||
218 | unsigned long ae = 0; | ||
219 | int i; | ||
220 | |||
221 | strncpy(buf, str, 15); | ||
222 | for (i = 0; i < 16; i++) { | ||
223 | if (!isdigit(buf[i])) { | ||
224 | buf[i] = '\0'; | ||
225 | break; | ||
226 | } | ||
227 | } | ||
228 | if ((kstrtoul(buf, 10, &ae))) | ||
229 | return -EFAULT; | ||
230 | |||
231 | *num = (unsigned int)ae; | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, | ||
236 | struct icp_qat_uof_initmem *init_mem, | ||
237 | unsigned int size_range, unsigned int *ae) | ||
238 | { | ||
239 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
240 | char *str; | ||
241 | |||
242 | if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { | ||
243 | pr_err("QAT: initmem is out of range"); | ||
244 | return -EINVAL; | ||
245 | } | ||
246 | if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { | ||
247 | pr_err("QAT: Memory scope for init_mem error\n"); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); | ||
251 | if (!str) { | ||
252 | pr_err("QAT: AE name assigned in UOF init table is NULL\n"); | ||
253 | return -EINVAL; | ||
254 | } | ||
255 | if (qat_uclo_parse_num(str, ae)) { | ||
256 | pr_err("QAT: Parse num for AE number failed\n"); | ||
257 | return -EINVAL; | ||
258 | } | ||
259 | if (*ae >= ICP_QAT_UCLO_MAX_AE) { | ||
260 | pr_err("QAT: ae %d out of range\n", *ae); | ||
261 | return -EINVAL; | ||
262 | } | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle | ||
267 | *handle, struct icp_qat_uof_initmem | ||
268 | *init_mem, unsigned int ae, | ||
269 | struct icp_qat_uof_batch_init | ||
270 | **init_tab_base) | ||
271 | { | ||
272 | struct icp_qat_uof_batch_init *init_header, *tail; | ||
273 | struct icp_qat_uof_batch_init *mem_init, *tail_old; | ||
274 | struct icp_qat_uof_memvar_attr *mem_val_attr; | ||
275 | unsigned int i, flag = 0; | ||
276 | |||
277 | mem_val_attr = | ||
278 | (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + | ||
279 | sizeof(struct icp_qat_uof_initmem)); | ||
280 | |||
281 | init_header = *init_tab_base; | ||
282 | if (!init_header) { | ||
283 | init_header = kzalloc(sizeof(*init_header), GFP_KERNEL); | ||
284 | if (!init_header) | ||
285 | return -ENOMEM; | ||
286 | init_header->size = 1; | ||
287 | *init_tab_base = init_header; | ||
288 | flag = 1; | ||
289 | } | ||
290 | tail_old = init_header; | ||
291 | while (tail_old->next) | ||
292 | tail_old = tail_old->next; | ||
293 | tail = tail_old; | ||
294 | for (i = 0; i < init_mem->val_attr_num; i++) { | ||
295 | mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL); | ||
296 | if (!mem_init) | ||
297 | goto out_err; | ||
298 | mem_init->ae = ae; | ||
299 | mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; | ||
300 | mem_init->value = &mem_val_attr->value; | ||
301 | mem_init->size = 4; | ||
302 | mem_init->next = NULL; | ||
303 | tail->next = mem_init; | ||
304 | tail = mem_init; | ||
305 | init_header->size += qat_hal_get_ins_num(); | ||
306 | mem_val_attr++; | ||
307 | } | ||
308 | return 0; | ||
309 | out_err: | ||
310 | while (tail_old) { | ||
311 | mem_init = tail_old->next; | ||
312 | kfree(tail_old); | ||
313 | tail_old = mem_init; | ||
314 | } | ||
315 | if (flag) | ||
316 | kfree(*init_tab_base); | ||
317 | return -ENOMEM; | ||
318 | } | ||
319 | |||
320 | static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, | ||
321 | struct icp_qat_uof_initmem *init_mem) | ||
322 | { | ||
323 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
324 | unsigned int ae; | ||
325 | |||
326 | if (qat_uclo_fetch_initmem_ae(handle, init_mem, | ||
327 | ICP_QAT_UCLO_MAX_LMEM_REG, &ae)) | ||
328 | return -EINVAL; | ||
329 | if (qat_uclo_create_batch_init_list(handle, init_mem, ae, | ||
330 | &obj_handle->lm_init_tab[ae])) | ||
331 | return -EINVAL; | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, | ||
336 | struct icp_qat_uof_initmem *init_mem) | ||
337 | { | ||
338 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
339 | unsigned int ae, ustore_size, uaddr, i; | ||
340 | |||
341 | ustore_size = obj_handle->ustore_phy_size; | ||
342 | if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) | ||
343 | return -EINVAL; | ||
344 | if (qat_uclo_create_batch_init_list(handle, init_mem, ae, | ||
345 | &obj_handle->umem_init_tab[ae])) | ||
346 | return -EINVAL; | ||
347 | /* set the highest ustore address referenced */ | ||
348 | uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; | ||
349 | for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) { | ||
350 | if (obj_handle->ae_data[ae].ae_slices[i]. | ||
351 | encap_image->uwords_num < uaddr) | ||
352 | obj_handle->ae_data[ae].ae_slices[i]. | ||
353 | encap_image->uwords_num = uaddr; | ||
354 | } | ||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 | ||
359 | static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, | ||
360 | struct icp_qat_uof_initmem *init_mem) | ||
361 | { | ||
362 | unsigned int i; | ||
363 | struct icp_qat_uof_memvar_attr *mem_val_attr; | ||
364 | |||
365 | mem_val_attr = | ||
366 | (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + | ||
367 | sizeof(struct icp_qat_uof_initmem)); | ||
368 | |||
369 | switch (init_mem->region) { | ||
370 | case ICP_QAT_UOF_SRAM_REGION: | ||
371 | if ((init_mem->addr + init_mem->num_in_bytes) > | ||
372 | ICP_DH895XCC_PESRAM_BAR_SIZE) { | ||
373 | pr_err("QAT: initmem on SRAM is out of range"); | ||
374 | return -EINVAL; | ||
375 | } | ||
376 | for (i = 0; i < init_mem->val_attr_num; i++) { | ||
377 | qat_uclo_wr_sram_by_words(handle, | ||
378 | init_mem->addr + | ||
379 | mem_val_attr->offset_in_byte, | ||
380 | &mem_val_attr->value, 4); | ||
381 | mem_val_attr++; | ||
382 | } | ||
383 | break; | ||
384 | case ICP_QAT_UOF_LMEM_REGION: | ||
385 | if (qat_uclo_init_lmem_seg(handle, init_mem)) | ||
386 | return -EINVAL; | ||
387 | break; | ||
388 | case ICP_QAT_UOF_UMEM_REGION: | ||
389 | if (qat_uclo_init_umem_seg(handle, init_mem)) | ||
390 | return -EINVAL; | ||
391 | break; | ||
392 | default: | ||
393 | pr_err("QAT: initmem region error. region type=0x%x\n", | ||
394 | init_mem->region); | ||
395 | return -EINVAL; | ||
396 | } | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, | ||
401 | struct icp_qat_uclo_encapme *image) | ||
402 | { | ||
403 | unsigned int i; | ||
404 | struct icp_qat_uclo_encap_page *page; | ||
405 | struct icp_qat_uof_image *uof_image; | ||
406 | unsigned char ae; | ||
407 | unsigned int ustore_size; | ||
408 | unsigned int patt_pos; | ||
409 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
410 | uint64_t *fill_data; | ||
411 | |||
412 | uof_image = image->img_ptr; | ||
413 | fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), | ||
414 | GFP_KERNEL); | ||
415 | if (!fill_data) | ||
416 | return -ENOMEM; | ||
417 | for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) | ||
418 | memcpy(&fill_data[i], &uof_image->fill_pattern, | ||
419 | sizeof(uint64_t)); | ||
420 | page = image->page; | ||
421 | |||
422 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
423 | if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) | ||
424 | continue; | ||
425 | ustore_size = obj_handle->ae_data[ae].eff_ustore_size; | ||
426 | patt_pos = page->beg_addr_p + page->micro_words_num; | ||
427 | |||
428 | qat_hal_wr_uwords(handle, (unsigned char)ae, 0, | ||
429 | page->beg_addr_p, &fill_data[0]); | ||
430 | qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos, | ||
431 | ustore_size - patt_pos + 1, | ||
432 | &fill_data[page->beg_addr_p]); | ||
433 | } | ||
434 | kfree(fill_data); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) | ||
439 | { | ||
440 | int i, ae; | ||
441 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
442 | struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; | ||
443 | |||
444 | for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { | ||
445 | if (initmem->num_in_bytes) { | ||
446 | if (qat_uclo_init_ae_memory(handle, initmem)) | ||
447 | return -EINVAL; | ||
448 | } | ||
449 | initmem = (struct icp_qat_uof_initmem *)((unsigned long)( | ||
450 | (unsigned long)initmem + | ||
451 | sizeof(struct icp_qat_uof_initmem)) + | ||
452 | (sizeof(struct icp_qat_uof_memvar_attr) * | ||
453 | initmem->val_attr_num)); | ||
454 | } | ||
455 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
456 | if (qat_hal_batch_wr_lm(handle, ae, | ||
457 | obj_handle->lm_init_tab[ae])) { | ||
458 | pr_err("QAT: fail to batch init lmem for AE %d\n", ae); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | qat_uclo_cleanup_batch_init_list(handle, | ||
462 | &obj_handle->lm_init_tab[ae]); | ||
463 | qat_uclo_batch_wr_umem(handle, ae, | ||
464 | obj_handle->umem_init_tab[ae]); | ||
465 | qat_uclo_cleanup_batch_init_list(handle, | ||
466 | &obj_handle-> | ||
467 | umem_init_tab[ae]); | ||
468 | } | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, | ||
473 | char *chunk_id, void *cur) | ||
474 | { | ||
475 | int i; | ||
476 | struct icp_qat_uof_chunkhdr *chunk_hdr = | ||
477 | (struct icp_qat_uof_chunkhdr *) | ||
478 | ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); | ||
479 | |||
480 | for (i = 0; i < obj_hdr->num_chunks; i++) { | ||
481 | if ((cur < (void *)&chunk_hdr[i]) && | ||
482 | !strncmp(chunk_hdr[i].chunk_id, chunk_id, | ||
483 | ICP_QAT_UOF_OBJID_LEN)) { | ||
484 | return &chunk_hdr[i]; | ||
485 | } | ||
486 | } | ||
487 | return NULL; | ||
488 | } | ||
489 | |||
490 | static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch) | ||
491 | { | ||
492 | int i; | ||
493 | unsigned int topbit = 1 << 0xF; | ||
494 | unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); | ||
495 | |||
496 | reg ^= inbyte << 0x8; | ||
497 | for (i = 0; i < 0x8; i++) { | ||
498 | if (reg & topbit) | ||
499 | reg = (reg << 1) ^ 0x1021; | ||
500 | else | ||
501 | reg <<= 1; | ||
502 | } | ||
503 | return reg & 0xFFFF; | ||
504 | } | ||
505 | |||
506 | static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num) | ||
507 | { | ||
508 | unsigned int chksum = 0; | ||
509 | |||
510 | if (ptr) | ||
511 | while (num--) | ||
512 | chksum = qat_uclo_calc_checksum(chksum, *ptr++); | ||
513 | return chksum; | ||
514 | } | ||
515 | |||
516 | static struct icp_qat_uclo_objhdr * | ||
517 | qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, | ||
518 | char *chunk_id) | ||
519 | { | ||
520 | struct icp_qat_uof_filechunkhdr *file_chunk; | ||
521 | struct icp_qat_uclo_objhdr *obj_hdr; | ||
522 | char *chunk; | ||
523 | int i; | ||
524 | |||
525 | file_chunk = (struct icp_qat_uof_filechunkhdr *) | ||
526 | (buf + sizeof(struct icp_qat_uof_filehdr)); | ||
527 | for (i = 0; i < file_hdr->num_chunks; i++) { | ||
528 | if (!strncmp(file_chunk->chunk_id, chunk_id, | ||
529 | ICP_QAT_UOF_OBJID_LEN)) { | ||
530 | chunk = buf + file_chunk->offset; | ||
531 | if (file_chunk->checksum != qat_uclo_calc_str_checksum( | ||
532 | chunk, file_chunk->size)) | ||
533 | break; | ||
534 | obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); | ||
535 | if (!obj_hdr) | ||
536 | break; | ||
537 | obj_hdr->file_buff = chunk; | ||
538 | obj_hdr->checksum = file_chunk->checksum; | ||
539 | obj_hdr->size = file_chunk->size; | ||
540 | return obj_hdr; | ||
541 | } | ||
542 | file_chunk++; | ||
543 | } | ||
544 | return NULL; | ||
545 | } | ||
546 | |||
547 | static unsigned int | ||
548 | qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, | ||
549 | struct icp_qat_uof_image *image) | ||
550 | { | ||
551 | struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; | ||
552 | struct icp_qat_uof_objtable *neigh_reg_tab; | ||
553 | struct icp_qat_uof_code_page *code_page; | ||
554 | |||
555 | code_page = (struct icp_qat_uof_code_page *) | ||
556 | ((char *)image + sizeof(struct icp_qat_uof_image)); | ||
557 | uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + | ||
558 | code_page->uc_var_tab_offset); | ||
559 | imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + | ||
560 | code_page->imp_var_tab_offset); | ||
561 | imp_expr_tab = (struct icp_qat_uof_objtable *) | ||
562 | (encap_uof_obj->beg_uof + | ||
563 | code_page->imp_expr_tab_offset); | ||
564 | if (uc_var_tab->entry_num || imp_var_tab->entry_num || | ||
565 | imp_expr_tab->entry_num) { | ||
566 | pr_err("QAT: UOF can't contain imported variable to be parsed"); | ||
567 | return -EINVAL; | ||
568 | } | ||
569 | neigh_reg_tab = (struct icp_qat_uof_objtable *) | ||
570 | (encap_uof_obj->beg_uof + | ||
571 | code_page->neigh_reg_tab_offset); | ||
572 | if (neigh_reg_tab->entry_num) { | ||
573 | pr_err("QAT: UOF can't contain shared control store feature"); | ||
574 | return -EINVAL; | ||
575 | } | ||
576 | if (image->numpages > 1) { | ||
577 | pr_err("QAT: UOF can't contain multiple pages"); | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { | ||
581 | pr_err("QAT: UOF can't use shared control store feature"); | ||
582 | return -EFAULT; | ||
583 | } | ||
584 | if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { | ||
585 | pr_err("QAT: UOF can't use reloadable feature"); | ||
586 | return -EFAULT; | ||
587 | } | ||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj | ||
592 | *encap_uof_obj, | ||
593 | struct icp_qat_uof_image *img, | ||
594 | struct icp_qat_uclo_encap_page *page) | ||
595 | { | ||
596 | struct icp_qat_uof_code_page *code_page; | ||
597 | struct icp_qat_uof_code_area *code_area; | ||
598 | struct icp_qat_uof_objtable *uword_block_tab; | ||
599 | struct icp_qat_uof_uword_block *uwblock; | ||
600 | int i; | ||
601 | |||
602 | code_page = (struct icp_qat_uof_code_page *) | ||
603 | ((char *)img + sizeof(struct icp_qat_uof_image)); | ||
604 | page->def_page = code_page->def_page; | ||
605 | page->page_region = code_page->page_region; | ||
606 | page->beg_addr_v = code_page->beg_addr_v; | ||
607 | page->beg_addr_p = code_page->beg_addr_p; | ||
608 | code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + | ||
609 | code_page->code_area_offset); | ||
610 | page->micro_words_num = code_area->micro_words_num; | ||
611 | uword_block_tab = (struct icp_qat_uof_objtable *) | ||
612 | (encap_uof_obj->beg_uof + | ||
613 | code_area->uword_block_tab); | ||
614 | page->uwblock_num = uword_block_tab->entry_num; | ||
615 | uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab + | ||
616 | sizeof(struct icp_qat_uof_objtable)); | ||
617 | page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; | ||
618 | for (i = 0; i < uword_block_tab->entry_num; i++) | ||
619 | page->uwblock[i].micro_words = | ||
620 | (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; | ||
621 | } | ||
622 | |||
623 | static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, | ||
624 | struct icp_qat_uclo_encapme *ae_uimage, | ||
625 | int max_image) | ||
626 | { | ||
627 | int i, j; | ||
628 | struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; | ||
629 | struct icp_qat_uof_image *image; | ||
630 | struct icp_qat_uof_objtable *ae_regtab; | ||
631 | struct icp_qat_uof_objtable *init_reg_sym_tab; | ||
632 | struct icp_qat_uof_objtable *sbreak_tab; | ||
633 | struct icp_qat_uof_encap_obj *encap_uof_obj = | ||
634 | &obj_handle->encap_uof_obj; | ||
635 | |||
636 | for (j = 0; j < max_image; j++) { | ||
637 | chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, | ||
638 | ICP_QAT_UOF_IMAG, chunk_hdr); | ||
639 | if (!chunk_hdr) | ||
640 | break; | ||
641 | image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + | ||
642 | chunk_hdr->offset); | ||
643 | ae_regtab = (struct icp_qat_uof_objtable *) | ||
644 | (image->reg_tab_offset + | ||
645 | obj_handle->obj_hdr->file_buff); | ||
646 | ae_uimage[j].ae_reg_num = ae_regtab->entry_num; | ||
647 | ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *) | ||
648 | (((char *)ae_regtab) + | ||
649 | sizeof(struct icp_qat_uof_objtable)); | ||
650 | init_reg_sym_tab = (struct icp_qat_uof_objtable *) | ||
651 | (image->init_reg_sym_tab + | ||
652 | obj_handle->obj_hdr->file_buff); | ||
653 | ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; | ||
654 | ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *) | ||
655 | (((char *)init_reg_sym_tab) + | ||
656 | sizeof(struct icp_qat_uof_objtable)); | ||
657 | sbreak_tab = (struct icp_qat_uof_objtable *) | ||
658 | (image->sbreak_tab + obj_handle->obj_hdr->file_buff); | ||
659 | ae_uimage[j].sbreak_num = sbreak_tab->entry_num; | ||
660 | ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *) | ||
661 | (((char *)sbreak_tab) + | ||
662 | sizeof(struct icp_qat_uof_objtable)); | ||
663 | ae_uimage[j].img_ptr = image; | ||
664 | if (qat_uclo_check_image_compat(encap_uof_obj, image)) | ||
665 | goto out_err; | ||
666 | ae_uimage[j].page = | ||
667 | kzalloc(sizeof(struct icp_qat_uclo_encap_page), | ||
668 | GFP_KERNEL); | ||
669 | if (!ae_uimage[j].page) | ||
670 | goto out_err; | ||
671 | qat_uclo_map_image_page(encap_uof_obj, image, | ||
672 | ae_uimage[j].page); | ||
673 | } | ||
674 | return j; | ||
675 | out_err: | ||
676 | for (i = 0; i < j; i++) | ||
677 | kfree(ae_uimage[i].page); | ||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) | ||
682 | { | ||
683 | int i, ae; | ||
684 | int mflag = 0; | ||
685 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
686 | |||
687 | for (ae = 0; ae <= max_ae; ae++) { | ||
688 | if (!test_bit(ae, | ||
689 | (unsigned long *)&handle->hal_handle->ae_mask)) | ||
690 | continue; | ||
691 | for (i = 0; i < obj_handle->uimage_num; i++) { | ||
692 | if (!test_bit(ae, (unsigned long *) | ||
693 | &obj_handle->ae_uimage[i].img_ptr->ae_assigned)) | ||
694 | continue; | ||
695 | mflag = 1; | ||
696 | if (qat_uclo_init_ae_data(obj_handle, ae, i)) | ||
697 | return -EINVAL; | ||
698 | } | ||
699 | } | ||
700 | if (!mflag) { | ||
701 | pr_err("QAT: uimage uses AE not set"); | ||
702 | return -EINVAL; | ||
703 | } | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static struct icp_qat_uof_strtable * | ||
708 | qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, | ||
709 | char *tab_name, struct icp_qat_uof_strtable *str_table) | ||
710 | { | ||
711 | struct icp_qat_uof_chunkhdr *chunk_hdr; | ||
712 | |||
713 | chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *) | ||
714 | obj_hdr->file_buff, tab_name, NULL); | ||
715 | if (chunk_hdr) { | ||
716 | int hdr_size; | ||
717 | |||
718 | memcpy(&str_table->table_len, obj_hdr->file_buff + | ||
719 | chunk_hdr->offset, sizeof(str_table->table_len)); | ||
720 | hdr_size = (char *)&str_table->strings - (char *)str_table; | ||
721 | str_table->strings = (unsigned long)obj_hdr->file_buff + | ||
722 | chunk_hdr->offset + hdr_size; | ||
723 | return str_table; | ||
724 | } | ||
725 | return NULL; | ||
726 | } | ||
727 | |||
728 | static void | ||
729 | qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, | ||
730 | struct icp_qat_uclo_init_mem_table *init_mem_tab) | ||
731 | { | ||
732 | struct icp_qat_uof_chunkhdr *chunk_hdr; | ||
733 | |||
734 | chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, | ||
735 | ICP_QAT_UOF_IMEM, NULL); | ||
736 | if (chunk_hdr) { | ||
737 | memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof + | ||
738 | chunk_hdr->offset, sizeof(unsigned int)); | ||
739 | init_mem_tab->init_mem = (struct icp_qat_uof_initmem *) | ||
740 | (encap_uof_obj->beg_uof + chunk_hdr->offset + | ||
741 | sizeof(unsigned int)); | ||
742 | } | ||
743 | } | ||
744 | |||
745 | static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) | ||
746 | { | ||
747 | unsigned int maj_ver, prod_type = obj_handle->prod_type; | ||
748 | |||
749 | if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { | ||
750 | pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", | ||
751 | obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); | ||
752 | return -EINVAL; | ||
753 | } | ||
754 | maj_ver = obj_handle->prod_rev & 0xff; | ||
755 | if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) || | ||
756 | (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) { | ||
757 | pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver); | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, | ||
764 | unsigned char ae, unsigned char ctx_mask, | ||
765 | enum icp_qat_uof_regtype reg_type, | ||
766 | unsigned short reg_addr, unsigned int value) | ||
767 | { | ||
768 | switch (reg_type) { | ||
769 | case ICP_GPA_ABS: | ||
770 | case ICP_GPB_ABS: | ||
771 | ctx_mask = 0; | ||
772 | case ICP_GPA_REL: | ||
773 | case ICP_GPB_REL: | ||
774 | return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, | ||
775 | reg_addr, value); | ||
776 | case ICP_SR_ABS: | ||
777 | case ICP_DR_ABS: | ||
778 | case ICP_SR_RD_ABS: | ||
779 | case ICP_DR_RD_ABS: | ||
780 | ctx_mask = 0; | ||
781 | case ICP_SR_REL: | ||
782 | case ICP_DR_REL: | ||
783 | case ICP_SR_RD_REL: | ||
784 | case ICP_DR_RD_REL: | ||
785 | return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type, | ||
786 | reg_addr, value); | ||
787 | case ICP_SR_WR_ABS: | ||
788 | case ICP_DR_WR_ABS: | ||
789 | ctx_mask = 0; | ||
790 | case ICP_SR_WR_REL: | ||
791 | case ICP_DR_WR_REL: | ||
792 | return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, | ||
793 | reg_addr, value); | ||
794 | case ICP_NEIGH_REL: | ||
795 | return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); | ||
796 | default: | ||
797 | pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type); | ||
798 | return -EFAULT; | ||
799 | } | ||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, | ||
804 | unsigned int ae, | ||
805 | struct icp_qat_uclo_encapme *encap_ae) | ||
806 | { | ||
807 | unsigned int i; | ||
808 | unsigned char ctx_mask; | ||
809 | struct icp_qat_uof_init_regsym *init_regsym; | ||
810 | |||
811 | if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == | ||
812 | ICP_QAT_UCLO_MAX_CTX) | ||
813 | ctx_mask = 0xff; | ||
814 | else | ||
815 | ctx_mask = 0x55; | ||
816 | |||
817 | for (i = 0; i < encap_ae->init_regsym_num; i++) { | ||
818 | unsigned int exp_res; | ||
819 | |||
820 | init_regsym = &encap_ae->init_regsym[i]; | ||
821 | exp_res = init_regsym->value; | ||
822 | switch (init_regsym->init_type) { | ||
823 | case ICP_QAT_UOF_INIT_REG: | ||
824 | qat_uclo_init_reg(handle, ae, ctx_mask, | ||
825 | (enum icp_qat_uof_regtype) | ||
826 | init_regsym->reg_type, | ||
827 | (unsigned short)init_regsym->reg_addr, | ||
828 | exp_res); | ||
829 | break; | ||
830 | case ICP_QAT_UOF_INIT_REG_CTX: | ||
831 | /* check if ctx is appropriate for the ctxMode */ | ||
832 | if (!((1 << init_regsym->ctx) & ctx_mask)) { | ||
833 | pr_err("QAT: invalid ctx num = 0x%x\n", | ||
834 | init_regsym->ctx); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | qat_uclo_init_reg(handle, ae, | ||
838 | (unsigned char) | ||
839 | (1 << init_regsym->ctx), | ||
840 | (enum icp_qat_uof_regtype) | ||
841 | init_regsym->reg_type, | ||
842 | (unsigned short)init_regsym->reg_addr, | ||
843 | exp_res); | ||
844 | break; | ||
845 | case ICP_QAT_UOF_INIT_EXPR: | ||
846 | pr_err("QAT: INIT_EXPR feature not supported\n"); | ||
847 | return -EINVAL; | ||
848 | case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: | ||
849 | pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n"); | ||
850 | return -EINVAL; | ||
851 | default: | ||
852 | break; | ||
853 | } | ||
854 | } | ||
855 | return 0; | ||
856 | } | ||
857 | |||
858 | static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) | ||
859 | { | ||
860 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
861 | unsigned int s, ae; | ||
862 | |||
863 | if (obj_handle->global_inited) | ||
864 | return 0; | ||
865 | if (obj_handle->init_mem_tab.entry_num) { | ||
866 | if (qat_uclo_init_memory(handle)) { | ||
867 | pr_err("QAT: initialize memory failed\n"); | ||
868 | return -EINVAL; | ||
869 | } | ||
870 | } | ||
871 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
872 | for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { | ||
873 | if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) | ||
874 | continue; | ||
875 | if (qat_uclo_init_reg_sym(handle, ae, | ||
876 | obj_handle->ae_data[ae]. | ||
877 | ae_slices[s].encap_image)) | ||
878 | return -EINVAL; | ||
879 | } | ||
880 | } | ||
881 | obj_handle->global_inited = 1; | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) | ||
886 | { | ||
887 | unsigned char ae, nn_mode, s; | ||
888 | struct icp_qat_uof_image *uof_image; | ||
889 | struct icp_qat_uclo_aedata *ae_data; | ||
890 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
891 | |||
892 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
893 | if (!test_bit(ae, | ||
894 | (unsigned long *)&handle->hal_handle->ae_mask)) | ||
895 | continue; | ||
896 | ae_data = &obj_handle->ae_data[ae]; | ||
897 | for (s = 0; s < min_t(unsigned int, ae_data->slice_num, | ||
898 | ICP_QAT_UCLO_MAX_CTX); s++) { | ||
899 | if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) | ||
900 | continue; | ||
901 | uof_image = ae_data->ae_slices[s].encap_image->img_ptr; | ||
902 | if (qat_hal_set_ae_ctx_mode(handle, ae, | ||
903 | (char)ICP_QAT_CTX_MODE | ||
904 | (uof_image->ae_mode))) { | ||
905 | pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); | ||
906 | return -EFAULT; | ||
907 | } | ||
908 | nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); | ||
909 | if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { | ||
910 | pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); | ||
911 | return -EFAULT; | ||
912 | } | ||
913 | if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, | ||
914 | (char)ICP_QAT_LOC_MEM0_MODE | ||
915 | (uof_image->ae_mode))) { | ||
916 | pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); | ||
917 | return -EFAULT; | ||
918 | } | ||
919 | if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, | ||
920 | (char)ICP_QAT_LOC_MEM1_MODE | ||
921 | (uof_image->ae_mode))) { | ||
922 | pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); | ||
923 | return -EFAULT; | ||
924 | } | ||
925 | } | ||
926 | } | ||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) | ||
931 | { | ||
932 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
933 | struct icp_qat_uclo_encapme *image; | ||
934 | int a; | ||
935 | |||
936 | for (a = 0; a < obj_handle->uimage_num; a++) { | ||
937 | image = &obj_handle->ae_uimage[a]; | ||
938 | image->uwords_num = image->page->beg_addr_p + | ||
939 | image->page->micro_words_num; | ||
940 | } | ||
941 | } | ||
942 | |||
943 | static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) | ||
944 | { | ||
945 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
946 | unsigned int ae; | ||
947 | |||
948 | obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), | ||
949 | GFP_KERNEL); | ||
950 | if (!obj_handle->uword_buf) | ||
951 | return -ENOMEM; | ||
952 | obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; | ||
953 | obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) | ||
954 | obj_handle->obj_hdr->file_buff; | ||
955 | obj_handle->uword_in_bytes = 6; | ||
956 | obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; | ||
957 | obj_handle->prod_rev = PID_MAJOR_REV | | ||
958 | (PID_MINOR_REV & handle->hal_handle->revision_id); | ||
959 | if (qat_uclo_check_uof_compat(obj_handle)) { | ||
960 | pr_err("QAT: UOF incompatible\n"); | ||
961 | return -EINVAL; | ||
962 | } | ||
963 | obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; | ||
964 | if (!obj_handle->obj_hdr->file_buff || | ||
965 | !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, | ||
966 | &obj_handle->str_table)) { | ||
967 | pr_err("QAT: UOF doesn't have effective images\n"); | ||
968 | goto out_err; | ||
969 | } | ||
970 | obj_handle->uimage_num = | ||
971 | qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage, | ||
972 | ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); | ||
973 | if (!obj_handle->uimage_num) | ||
974 | goto out_err; | ||
975 | if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { | ||
976 | pr_err("QAT: Bad object\n"); | ||
977 | goto out_check_uof_aemask_err; | ||
978 | } | ||
979 | qat_uclo_init_uword_num(handle); | ||
980 | qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, | ||
981 | &obj_handle->init_mem_tab); | ||
982 | if (qat_uclo_set_ae_mode(handle)) | ||
983 | goto out_check_uof_aemask_err; | ||
984 | return 0; | ||
985 | out_check_uof_aemask_err: | ||
986 | for (ae = 0; ae < obj_handle->uimage_num; ae++) | ||
987 | kfree(obj_handle->ae_uimage[ae].page); | ||
988 | out_err: | ||
989 | kfree(obj_handle->uword_buf); | ||
990 | return -EFAULT; | ||
991 | } | ||
992 | |||
993 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | ||
994 | void *addr_ptr, int mem_size) | ||
995 | { | ||
996 | struct icp_qat_uof_filehdr *filehdr; | ||
997 | struct icp_qat_uclo_objhandle *objhdl; | ||
998 | |||
999 | BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= | ||
1000 | (sizeof(handle->hal_handle->ae_mask) * 8)); | ||
1001 | |||
1002 | if (!handle || !addr_ptr || mem_size < 24) | ||
1003 | return -EINVAL; | ||
1004 | objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); | ||
1005 | if (!objhdl) | ||
1006 | return -ENOMEM; | ||
1007 | objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL); | ||
1008 | if (!objhdl->obj_buf) | ||
1009 | goto out_objbuf_err; | ||
1010 | filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; | ||
1011 | if (qat_uclo_check_format(filehdr)) | ||
1012 | goto out_objhdr_err; | ||
1013 | objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, | ||
1014 | ICP_QAT_UOF_OBJS); | ||
1015 | if (!objhdl->obj_hdr) { | ||
1016 | pr_err("QAT: object file chunk is null\n"); | ||
1017 | goto out_objhdr_err; | ||
1018 | } | ||
1019 | handle->obj_handle = objhdl; | ||
1020 | if (qat_uclo_parse_uof_obj(handle)) | ||
1021 | goto out_overlay_obj_err; | ||
1022 | return 0; | ||
1023 | |||
1024 | out_overlay_obj_err: | ||
1025 | handle->obj_handle = NULL; | ||
1026 | kfree(objhdl->obj_hdr); | ||
1027 | out_objhdr_err: | ||
1028 | kfree(objhdl->obj_buf); | ||
1029 | out_objbuf_err: | ||
1030 | kfree(objhdl); | ||
1031 | return -ENOMEM; | ||
1032 | } | ||
1033 | |||
1034 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) | ||
1035 | { | ||
1036 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1037 | unsigned int a; | ||
1038 | |||
1039 | if (!obj_handle) | ||
1040 | return; | ||
1041 | |||
1042 | kfree(obj_handle->uword_buf); | ||
1043 | for (a = 0; a < obj_handle->uimage_num; a++) | ||
1044 | kfree(obj_handle->ae_uimage[a].page); | ||
1045 | |||
1046 | for (a = 0; a < handle->hal_handle->ae_max_num; a++) | ||
1047 | qat_uclo_free_ae_data(&obj_handle->ae_data[a]); | ||
1048 | |||
1049 | kfree(obj_handle->obj_hdr); | ||
1050 | kfree(obj_handle->obj_buf); | ||
1051 | kfree(obj_handle); | ||
1052 | handle->obj_handle = NULL; | ||
1053 | } | ||
1054 | |||
1055 | static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, | ||
1056 | struct icp_qat_uclo_encap_page *encap_page, | ||
1057 | uint64_t *uword, unsigned int addr_p, | ||
1058 | unsigned int raddr, uint64_t fill) | ||
1059 | { | ||
1060 | uint64_t uwrd = 0; | ||
1061 | unsigned int i; | ||
1062 | |||
1063 | if (!encap_page) { | ||
1064 | *uword = fill; | ||
1065 | return; | ||
1066 | } | ||
1067 | for (i = 0; i < encap_page->uwblock_num; i++) { | ||
1068 | if (raddr >= encap_page->uwblock[i].start_addr && | ||
1069 | raddr <= encap_page->uwblock[i].start_addr + | ||
1070 | encap_page->uwblock[i].words_num - 1) { | ||
1071 | raddr -= encap_page->uwblock[i].start_addr; | ||
1072 | raddr *= obj_handle->uword_in_bytes; | ||
1073 | memcpy(&uwrd, (void *)(((unsigned long) | ||
1074 | encap_page->uwblock[i].micro_words) + raddr), | ||
1075 | obj_handle->uword_in_bytes); | ||
1076 | uwrd = uwrd & 0xbffffffffffull; | ||
1077 | } | ||
1078 | } | ||
1079 | *uword = uwrd; | ||
1080 | if (*uword == INVLD_UWORD) | ||
1081 | *uword = fill; | ||
1082 | } | ||
1083 | |||
1084 | static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, | ||
1085 | struct icp_qat_uclo_encap_page | ||
1086 | *encap_page, unsigned int ae) | ||
1087 | { | ||
1088 | unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; | ||
1089 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1090 | uint64_t fill_pat; | ||
1091 | |||
1092 | /* load the page starting at appropriate ustore address */ | ||
1093 | /* get fill-pattern from an image -- they are all the same */ | ||
1094 | memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, | ||
1095 | sizeof(uint64_t)); | ||
1096 | uw_physical_addr = encap_page->beg_addr_p; | ||
1097 | uw_relative_addr = 0; | ||
1098 | words_num = encap_page->micro_words_num; | ||
1099 | while (words_num) { | ||
1100 | if (words_num < UWORD_CPYBUF_SIZE) | ||
1101 | cpylen = words_num; | ||
1102 | else | ||
1103 | cpylen = UWORD_CPYBUF_SIZE; | ||
1104 | |||
1105 | /* load the buffer */ | ||
1106 | for (i = 0; i < cpylen; i++) | ||
1107 | qat_uclo_fill_uwords(obj_handle, encap_page, | ||
1108 | &obj_handle->uword_buf[i], | ||
1109 | uw_physical_addr + i, | ||
1110 | uw_relative_addr + i, fill_pat); | ||
1111 | |||
1112 | /* copy the buffer to ustore */ | ||
1113 | qat_hal_wr_uwords(handle, (unsigned char)ae, | ||
1114 | uw_physical_addr, cpylen, | ||
1115 | obj_handle->uword_buf); | ||
1116 | |||
1117 | uw_physical_addr += cpylen; | ||
1118 | uw_relative_addr += cpylen; | ||
1119 | words_num -= cpylen; | ||
1120 | } | ||
1121 | } | ||
1122 | |||
1123 | static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, | ||
1124 | struct icp_qat_uof_image *image) | ||
1125 | { | ||
1126 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1127 | unsigned int ctx_mask, s; | ||
1128 | struct icp_qat_uclo_page *page; | ||
1129 | unsigned char ae; | ||
1130 | int ctx; | ||
1131 | |||
1132 | if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) | ||
1133 | ctx_mask = 0xff; | ||
1134 | else | ||
1135 | ctx_mask = 0x55; | ||
1136 | /* load the default page and set assigned CTX PC | ||
1137 | * to the entrypoint address */ | ||
1138 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
1139 | if (!test_bit(ae, (unsigned long *)&image->ae_assigned)) | ||
1140 | continue; | ||
1141 | /* find the slice to which this image is assigned */ | ||
1142 | for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { | ||
1143 | if (image->ctx_assigned & obj_handle->ae_data[ae]. | ||
1144 | ae_slices[s].ctx_mask_assigned) | ||
1145 | break; | ||
1146 | } | ||
1147 | if (s >= obj_handle->ae_data[ae].slice_num) | ||
1148 | continue; | ||
1149 | page = obj_handle->ae_data[ae].ae_slices[s].page; | ||
1150 | if (!page->encap_page->def_page) | ||
1151 | continue; | ||
1152 | qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); | ||
1153 | |||
1154 | page = obj_handle->ae_data[ae].ae_slices[s].page; | ||
1155 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) | ||
1156 | obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] = | ||
1157 | (ctx_mask & (1 << ctx)) ? page : NULL; | ||
1158 | qat_hal_set_live_ctx(handle, (unsigned char)ae, | ||
1159 | image->ctx_assigned); | ||
1160 | qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, | ||
1161 | image->entry_address); | ||
1162 | } | ||
1163 | } | ||
1164 | |||
1165 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) | ||
1166 | { | ||
1167 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1168 | unsigned int i; | ||
1169 | |||
1170 | if (qat_uclo_init_globals(handle)) | ||
1171 | return -EINVAL; | ||
1172 | for (i = 0; i < obj_handle->uimage_num; i++) { | ||
1173 | if (!obj_handle->ae_uimage[i].img_ptr) | ||
1174 | return -EINVAL; | ||
1175 | if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) | ||
1176 | return -EINVAL; | ||
1177 | qat_uclo_wr_uimage_page(handle, | ||
1178 | obj_handle->ae_uimage[i].img_ptr); | ||
1179 | } | ||
1180 | return 0; | ||
1181 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile new file mode 100644 index 000000000000..25171c557043 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | ccflags-y := -I$(src)/../qat_common | ||
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o | ||
3 | qat_dh895xcc-objs := adf_drv.o \ | ||
4 | adf_isr.o \ | ||
5 | adf_dh895xcc_hw_data.o \ | ||
6 | adf_hw_arbiter.o \ | ||
7 | qat_admin.o \ | ||
8 | adf_admin.o | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c new file mode 100644 index 000000000000..978d6c56639d --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/types.h> | ||
48 | #include <linux/mutex.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/delay.h> | ||
51 | #include <linux/pci.h> | ||
52 | #include <linux/dma-mapping.h> | ||
53 | #include <adf_accel_devices.h> | ||
54 | #include "adf_drv.h" | ||
55 | #include "adf_dh895xcc_hw_data.h" | ||
56 | |||
57 | #define ADF_ADMINMSG_LEN 32 | ||
58 | |||
59 | struct adf_admin_comms { | ||
60 | dma_addr_t phy_addr; | ||
61 | void *virt_addr; | ||
62 | void __iomem *mailbox_addr; | ||
63 | struct mutex lock; /* protects adf_admin_comms struct */ | ||
64 | }; | ||
65 | |||
66 | int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, | ||
67 | uint32_t ae, void *in, void *out) | ||
68 | { | ||
69 | struct adf_admin_comms *admin = accel_dev->admin; | ||
70 | int offset = ae * ADF_ADMINMSG_LEN * 2; | ||
71 | void __iomem *mailbox = admin->mailbox_addr; | ||
72 | int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; | ||
73 | int times, received; | ||
74 | |||
75 | mutex_lock(&admin->lock); | ||
76 | |||
77 | if (ADF_CSR_RD(mailbox, mb_offset) == 1) { | ||
78 | mutex_unlock(&admin->lock); | ||
79 | return -EAGAIN; | ||
80 | } | ||
81 | |||
82 | memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); | ||
83 | ADF_CSR_WR(mailbox, mb_offset, 1); | ||
84 | received = 0; | ||
85 | for (times = 0; times < 50; times++) { | ||
86 | msleep(20); | ||
87 | if (ADF_CSR_RD(mailbox, mb_offset) == 0) { | ||
88 | received = 1; | ||
89 | break; | ||
90 | } | ||
91 | } | ||
92 | if (received) | ||
93 | memcpy(out, admin->virt_addr + offset + | ||
94 | ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); | ||
95 | else | ||
96 | pr_err("QAT: Failed to send admin msg to accelerator\n"); | ||
97 | |||
98 | mutex_unlock(&admin->lock); | ||
99 | return received ? 0 : -EFAULT; | ||
100 | } | ||
101 | |||
102 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | ||
103 | { | ||
104 | struct adf_admin_comms *admin; | ||
105 | struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; | ||
106 | void __iomem *csr = pmisc->virt_addr; | ||
107 | void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; | ||
108 | uint64_t reg_val; | ||
109 | |||
110 | admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, | ||
111 | accel_dev->numa_node); | ||
112 | if (!admin) | ||
113 | return -ENOMEM; | ||
114 | admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
115 | &admin->phy_addr, GFP_KERNEL); | ||
116 | if (!admin->virt_addr) { | ||
117 | dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); | ||
118 | kfree(admin); | ||
119 | return -ENOMEM; | ||
120 | } | ||
121 | reg_val = (uint64_t)admin->phy_addr; | ||
122 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); | ||
123 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); | ||
124 | mutex_init(&admin->lock); | ||
125 | admin->mailbox_addr = mailbox; | ||
126 | accel_dev->admin = admin; | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) | ||
131 | { | ||
132 | struct adf_admin_comms *admin = accel_dev->admin; | ||
133 | |||
134 | if (!admin) | ||
135 | return; | ||
136 | |||
137 | if (admin->virt_addr) | ||
138 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
139 | admin->virt_addr, admin->phy_addr); | ||
140 | |||
141 | mutex_destroy(&admin->lock); | ||
142 | kfree(admin); | ||
143 | accel_dev->admin = NULL; | ||
144 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c new file mode 100644 index 000000000000..ef05825cc651 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <adf_accel_devices.h> | ||
48 | #include "adf_dh895xcc_hw_data.h" | ||
49 | #include "adf_drv.h" | ||
50 | |||
51 | /* Worker thread to service arbiter mappings based on dev SKUs */ | ||
52 | static const uint32_t thrd_to_arb_map_sku4[] = { | ||
53 | 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, | ||
54 | 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, | ||
55 | 0x00000000, 0x00000000, 0x00000000, 0x00000000 | ||
56 | }; | ||
57 | |||
58 | static const uint32_t thrd_to_arb_map_sku6[] = { | ||
59 | 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, | ||
60 | 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, | ||
61 | 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222 | ||
62 | }; | ||
63 | |||
64 | static struct adf_hw_device_class dh895xcc_class = { | ||
65 | .name = ADF_DH895XCC_DEVICE_NAME, | ||
66 | .type = DEV_DH895XCC, | ||
67 | .instances = 0 | ||
68 | }; | ||
69 | |||
70 | static uint32_t get_accel_mask(uint32_t fuse) | ||
71 | { | ||
72 | return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & | ||
73 | ADF_DH895XCC_ACCELERATORS_MASK; | ||
74 | } | ||
75 | |||
76 | static uint32_t get_ae_mask(uint32_t fuse) | ||
77 | { | ||
78 | return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK; | ||
79 | } | ||
80 | |||
81 | static uint32_t get_num_accels(struct adf_hw_device_data *self) | ||
82 | { | ||
83 | uint32_t i, ctr = 0; | ||
84 | |||
85 | if (!self || !self->accel_mask) | ||
86 | return 0; | ||
87 | |||
88 | for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) { | ||
89 | if (self->accel_mask & (1 << i)) | ||
90 | ctr++; | ||
91 | } | ||
92 | return ctr; | ||
93 | } | ||
94 | |||
95 | static uint32_t get_num_aes(struct adf_hw_device_data *self) | ||
96 | { | ||
97 | uint32_t i, ctr = 0; | ||
98 | |||
99 | if (!self || !self->ae_mask) | ||
100 | return 0; | ||
101 | |||
102 | for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) { | ||
103 | if (self->ae_mask & (1 << i)) | ||
104 | ctr++; | ||
105 | } | ||
106 | return ctr; | ||
107 | } | ||
108 | |||
109 | static uint32_t get_misc_bar_id(struct adf_hw_device_data *self) | ||
110 | { | ||
111 | return ADF_DH895XCC_PMISC_BAR; | ||
112 | } | ||
113 | |||
114 | static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) | ||
115 | { | ||
116 | return ADF_DH895XCC_ETR_BAR; | ||
117 | } | ||
118 | |||
119 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | ||
120 | { | ||
121 | int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) | ||
122 | >> ADF_DH895XCC_FUSECTL_SKU_SHIFT; | ||
123 | |||
124 | switch (sku) { | ||
125 | case ADF_DH895XCC_FUSECTL_SKU_1: | ||
126 | return DEV_SKU_1; | ||
127 | case ADF_DH895XCC_FUSECTL_SKU_2: | ||
128 | return DEV_SKU_2; | ||
129 | case ADF_DH895XCC_FUSECTL_SKU_3: | ||
130 | return DEV_SKU_3; | ||
131 | case ADF_DH895XCC_FUSECTL_SKU_4: | ||
132 | return DEV_SKU_4; | ||
133 | default: | ||
134 | return DEV_SKU_UNKNOWN; | ||
135 | } | ||
136 | return DEV_SKU_UNKNOWN; | ||
137 | } | ||
138 | |||
139 | void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | ||
140 | uint32_t const **arb_map_config) | ||
141 | { | ||
142 | switch (accel_dev->accel_pci_dev.sku) { | ||
143 | case DEV_SKU_1: | ||
144 | *arb_map_config = thrd_to_arb_map_sku4; | ||
145 | break; | ||
146 | |||
147 | case DEV_SKU_2: | ||
148 | case DEV_SKU_4: | ||
149 | *arb_map_config = thrd_to_arb_map_sku6; | ||
150 | break; | ||
151 | default: | ||
152 | pr_err("QAT: The configuration doesn't match any SKU"); | ||
153 | *arb_map_config = NULL; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) | ||
158 | { | ||
159 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
160 | struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; | ||
161 | void __iomem *csr = misc_bar->virt_addr; | ||
162 | unsigned int val, i; | ||
163 | |||
164 | /* Enable Accel Engine error detection & correction */ | ||
165 | for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { | ||
166 | val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i)); | ||
167 | val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR; | ||
168 | ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val); | ||
169 | val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i)); | ||
170 | val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR; | ||
171 | ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val); | ||
172 | } | ||
173 | |||
174 | /* Enable shared memory error detection & correction */ | ||
175 | for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { | ||
176 | val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i)); | ||
177 | val |= ADF_DH895XCC_ERRSSMSH_EN; | ||
178 | ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val); | ||
179 | val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i)); | ||
180 | val |= ADF_DH895XCC_ERRSSMSH_EN; | ||
181 | ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val); | ||
182 | } | ||
183 | } | ||
184 | |||
185 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | ||
186 | { | ||
187 | hw_data->dev_class = &dh895xcc_class; | ||
188 | hw_data->instance_id = dh895xcc_class.instances++; | ||
189 | hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS; | ||
190 | hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; | ||
191 | hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID; | ||
192 | hw_data->num_logical_accel = 1; | ||
193 | hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; | ||
194 | hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; | ||
195 | hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK; | ||
196 | hw_data->alloc_irq = adf_isr_resource_alloc; | ||
197 | hw_data->free_irq = adf_isr_resource_free; | ||
198 | hw_data->enable_error_correction = adf_enable_error_correction; | ||
199 | hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable; | ||
200 | hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable; | ||
201 | hw_data->get_accel_mask = get_accel_mask; | ||
202 | hw_data->get_ae_mask = get_ae_mask; | ||
203 | hw_data->get_num_accels = get_num_accels; | ||
204 | hw_data->get_num_aes = get_num_aes; | ||
205 | hw_data->get_etr_bar_id = get_etr_bar_id; | ||
206 | hw_data->get_misc_bar_id = get_misc_bar_id; | ||
207 | hw_data->get_sku = get_sku; | ||
208 | hw_data->fw_name = ADF_DH895XCC_FW; | ||
209 | } | ||
210 | |||
211 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | ||
212 | { | ||
213 | hw_data->dev_class->instances--; | ||
214 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h new file mode 100644 index 000000000000..b707f292b377 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_DH895x_HW_DATA_H_ | ||
48 | #define ADF_DH895x_HW_DATA_H_ | ||
49 | |||
50 | /* PCIe configuration space */ | ||
51 | #define ADF_DH895XCC_RX_RINGS_OFFSET 8 | ||
52 | #define ADF_DH895XCC_TX_RINGS_MASK 0xFF | ||
53 | #define ADF_DH895XCC_FUSECTL_OFFSET 0x40 | ||
54 | #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 | ||
55 | #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 | ||
56 | #define ADF_DH895XCC_FUSECTL_SKU_1 0x0 | ||
57 | #define ADF_DH895XCC_FUSECTL_SKU_2 0x1 | ||
58 | #define ADF_DH895XCC_FUSECTL_SKU_3 0x2 | ||
59 | #define ADF_DH895XCC_FUSECTL_SKU_4 0x3 | ||
60 | #define ADF_DH895XCC_MAX_ACCELERATORS 6 | ||
61 | #define ADF_DH895XCC_MAX_ACCELENGINES 12 | ||
62 | #define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13 | ||
63 | #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F | ||
64 | #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF | ||
65 | #define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C | ||
66 | #define ADF_DH895XCC_ETR_MAX_BANKS 32 | ||
67 | #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) | ||
68 | #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) | ||
69 | #define ADF_DH895XCC_SMIA0_MASK 0xFFFF | ||
70 | #define ADF_DH895XCC_SMIA1_MASK 0x1 | ||
71 | /* Error detection and correction */ | ||
72 | #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) | ||
73 | #define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) | ||
74 | #define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28) | ||
75 | #define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12) | ||
76 | #define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18) | ||
77 | #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) | ||
78 | #define ADF_DH895XCC_ERRSSMSH_EN (1 << 3) | ||
79 | |||
80 | /* Admin Messages Registers */ | ||
81 | #define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) | ||
82 | #define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) | ||
83 | #define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 | ||
84 | #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 | ||
85 | #define ADF_DH895XCC_FW "qat_895xcc.bin" | ||
86 | #endif | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c new file mode 100644 index 000000000000..0d0435a41be9 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/types.h> | ||
52 | #include <linux/fs.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/errno.h> | ||
55 | #include <linux/device.h> | ||
56 | #include <linux/dma-mapping.h> | ||
57 | #include <linux/platform_device.h> | ||
58 | #include <linux/workqueue.h> | ||
59 | #include <linux/io.h> | ||
60 | #include <adf_accel_devices.h> | ||
61 | #include <adf_common_drv.h> | ||
62 | #include <adf_cfg.h> | ||
63 | #include <adf_transport_access_macros.h> | ||
64 | #include "adf_dh895xcc_hw_data.h" | ||
65 | #include "adf_drv.h" | ||
66 | |||
67 | static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME; | ||
68 | |||
69 | #define ADF_SYSTEM_DEVICE(device_id) \ | ||
70 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | ||
71 | |||
72 | static const struct pci_device_id adf_pci_tbl[] = { | ||
73 | ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID), | ||
74 | {0,} | ||
75 | }; | ||
76 | MODULE_DEVICE_TABLE(pci, adf_pci_tbl); | ||
77 | |||
78 | static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); | ||
79 | static void adf_remove(struct pci_dev *dev); | ||
80 | |||
81 | static struct pci_driver adf_driver = { | ||
82 | .id_table = adf_pci_tbl, | ||
83 | .name = adf_driver_name, | ||
84 | .probe = adf_probe, | ||
85 | .remove = adf_remove | ||
86 | }; | ||
87 | |||
88 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | ||
89 | { | ||
90 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | ||
91 | int i; | ||
92 | |||
93 | adf_exit_admin_comms(accel_dev); | ||
94 | adf_exit_arb(accel_dev); | ||
95 | adf_cleanup_etr_data(accel_dev); | ||
96 | |||
97 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
98 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
99 | |||
100 | if (bar->virt_addr) | ||
101 | pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); | ||
102 | } | ||
103 | |||
104 | if (accel_dev->hw_device) { | ||
105 | switch (accel_dev->hw_device->pci_dev_id) { | ||
106 | case ADF_DH895XCC_PCI_DEVICE_ID: | ||
107 | adf_clean_hw_data_dh895xcc(accel_dev->hw_device); | ||
108 | break; | ||
109 | default: | ||
110 | break; | ||
111 | } | ||
112 | kfree(accel_dev->hw_device); | ||
113 | } | ||
114 | adf_cfg_dev_remove(accel_dev); | ||
115 | debugfs_remove(accel_dev->debugfs_dir); | ||
116 | adf_devmgr_rm_dev(accel_dev); | ||
117 | pci_release_regions(accel_pci_dev->pci_dev); | ||
118 | pci_disable_device(accel_pci_dev->pci_dev); | ||
119 | kfree(accel_dev); | ||
120 | } | ||
121 | |||
122 | static uint8_t adf_get_dev_node_id(struct pci_dev *pdev) | ||
123 | { | ||
124 | unsigned int bus_per_cpu = 0; | ||
125 | struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1); | ||
126 | |||
127 | if (!c->phys_proc_id) | ||
128 | return 0; | ||
129 | |||
130 | bus_per_cpu = 256 / (c->phys_proc_id + 1); | ||
131 | |||
132 | if (bus_per_cpu != 0) | ||
133 | return pdev->bus->number / bus_per_cpu; | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static int qat_dev_start(struct adf_accel_dev *accel_dev) | ||
138 | { | ||
139 | int cpus = num_online_cpus(); | ||
140 | int banks = GET_MAX_BANKS(accel_dev); | ||
141 | int instances = min(cpus, banks); | ||
142 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
143 | int i; | ||
144 | unsigned long val; | ||
145 | |||
146 | if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) | ||
147 | goto err; | ||
148 | if (adf_cfg_section_add(accel_dev, "Accelerator0")) | ||
149 | goto err; | ||
150 | for (i = 0; i < instances; i++) { | ||
151 | val = i; | ||
152 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); | ||
153 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
154 | key, (void *)&val, ADF_DEC)) | ||
155 | goto err; | ||
156 | |||
157 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, | ||
158 | i); | ||
159 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
160 | key, (void *)&val, ADF_DEC)) | ||
161 | goto err; | ||
162 | |||
163 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); | ||
164 | val = 128; | ||
165 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
166 | key, (void *)&val, ADF_DEC)) | ||
167 | goto err; | ||
168 | |||
169 | val = 512; | ||
170 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); | ||
171 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
172 | key, (void *)&val, ADF_DEC)) | ||
173 | goto err; | ||
174 | |||
175 | val = 0; | ||
176 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); | ||
177 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
178 | key, (void *)&val, ADF_DEC)) | ||
179 | goto err; | ||
180 | |||
181 | val = 2; | ||
182 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); | ||
183 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
184 | key, (void *)&val, ADF_DEC)) | ||
185 | goto err; | ||
186 | |||
187 | val = 4; | ||
188 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); | ||
189 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
190 | key, (void *)&val, ADF_DEC)) | ||
191 | goto err; | ||
192 | |||
193 | val = 8; | ||
194 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); | ||
195 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
196 | key, (void *)&val, ADF_DEC)) | ||
197 | goto err; | ||
198 | |||
199 | val = 10; | ||
200 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); | ||
201 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
202 | key, (void *)&val, ADF_DEC)) | ||
203 | goto err; | ||
204 | |||
205 | val = 12; | ||
206 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); | ||
207 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
208 | key, (void *)&val, ADF_DEC)) | ||
209 | goto err; | ||
210 | |||
211 | val = ADF_COALESCING_DEF_TIME; | ||
212 | snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); | ||
213 | if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", | ||
214 | key, (void *)&val, ADF_DEC)) | ||
215 | goto err; | ||
216 | } | ||
217 | |||
218 | val = i; | ||
219 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
220 | ADF_NUM_CY, (void *)&val, ADF_DEC)) | ||
221 | goto err; | ||
222 | |||
223 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
224 | return adf_dev_start(accel_dev); | ||
225 | err: | ||
226 | dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
230 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
231 | { | ||
232 | struct adf_accel_dev *accel_dev; | ||
233 | struct adf_accel_pci *accel_pci_dev; | ||
234 | struct adf_hw_device_data *hw_data; | ||
235 | void __iomem *pmisc_bar_addr = NULL; | ||
236 | char name[ADF_DEVICE_NAME_LENGTH]; | ||
237 | unsigned int i, bar_nr; | ||
238 | uint8_t node; | ||
239 | int ret; | ||
240 | |||
241 | switch (ent->device) { | ||
242 | case ADF_DH895XCC_PCI_DEVICE_ID: | ||
243 | break; | ||
244 | default: | ||
245 | dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); | ||
246 | return -ENODEV; | ||
247 | } | ||
248 | |||
249 | node = adf_get_dev_node_id(pdev); | ||
250 | accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); | ||
251 | if (!accel_dev) | ||
252 | return -ENOMEM; | ||
253 | |||
254 | accel_dev->numa_node = node; | ||
255 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
256 | |||
257 | /* Add accel device to accel table. | ||
258 | * This should be called before adf_cleanup_accel is called */ | ||
259 | if (adf_devmgr_add_dev(accel_dev)) { | ||
260 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); | ||
261 | kfree(accel_dev); | ||
262 | return -EFAULT; | ||
263 | } | ||
264 | |||
265 | accel_dev->owner = THIS_MODULE; | ||
266 | /* Allocate and configure device configuration structure */ | ||
267 | hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); | ||
268 | if (!hw_data) { | ||
269 | ret = -ENOMEM; | ||
270 | goto out_err; | ||
271 | } | ||
272 | |||
273 | accel_dev->hw_device = hw_data; | ||
274 | switch (ent->device) { | ||
275 | case ADF_DH895XCC_PCI_DEVICE_ID: | ||
276 | adf_init_hw_data_dh895xcc(accel_dev->hw_device); | ||
277 | break; | ||
278 | default: | ||
279 | return -ENODEV; | ||
280 | } | ||
281 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
282 | pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); | ||
283 | pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, | ||
284 | &hw_data->fuses); | ||
285 | |||
286 | /* Get Accelerators and Accelerators Engines masks */ | ||
287 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | ||
288 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); | ||
289 | accel_pci_dev->sku = hw_data->get_sku(hw_data); | ||
290 | accel_pci_dev->pci_dev = pdev; | ||
291 | /* If the device has no acceleration engines then ignore it. */ | ||
292 | if (!hw_data->accel_mask || !hw_data->ae_mask || | ||
293 | ((~hw_data->ae_mask) & 0x01)) { | ||
294 | dev_err(&pdev->dev, "No acceleration units found"); | ||
295 | ret = -EFAULT; | ||
296 | goto out_err; | ||
297 | } | ||
298 | |||
299 | /* Create dev top level debugfs entry */ | ||
300 | snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX, | ||
301 | hw_data->dev_class->name, hw_data->instance_id); | ||
302 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); | ||
303 | if (!accel_dev->debugfs_dir) { | ||
304 | dev_err(&pdev->dev, "Could not create debugfs dir\n"); | ||
305 | ret = -EINVAL; | ||
306 | goto out_err; | ||
307 | } | ||
308 | |||
309 | /* Create device configuration table */ | ||
310 | ret = adf_cfg_dev_add(accel_dev); | ||
311 | if (ret) | ||
312 | goto out_err; | ||
313 | |||
314 | /* enable PCI device */ | ||
315 | if (pci_enable_device(pdev)) { | ||
316 | ret = -EFAULT; | ||
317 | goto out_err; | ||
318 | } | ||
319 | |||
320 | /* set dma identifier */ | ||
321 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
322 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
323 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | ||
324 | ret = -EFAULT; | ||
325 | goto out_err; | ||
326 | } else { | ||
327 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
328 | } | ||
329 | |||
330 | } else { | ||
331 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
332 | } | ||
333 | |||
334 | if (pci_request_regions(pdev, adf_driver_name)) { | ||
335 | ret = -EFAULT; | ||
336 | goto out_err; | ||
337 | } | ||
338 | |||
339 | /* Read accelerator capabilities mask */ | ||
340 | pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET, | ||
341 | &hw_data->accel_capabilities_mask); | ||
342 | |||
343 | /* Find and map all the device's BARS */ | ||
344 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
345 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
346 | |||
347 | bar_nr = i * 2; | ||
348 | bar->base_addr = pci_resource_start(pdev, bar_nr); | ||
349 | if (!bar->base_addr) | ||
350 | break; | ||
351 | bar->size = pci_resource_len(pdev, bar_nr); | ||
352 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); | ||
353 | if (!bar->virt_addr) { | ||
354 | dev_err(&pdev->dev, "Failed to map BAR %d\n", i); | ||
355 | ret = -EFAULT; | ||
356 | goto out_err; | ||
357 | } | ||
358 | if (i == ADF_DH895XCC_PMISC_BAR) | ||
359 | pmisc_bar_addr = bar->virt_addr; | ||
360 | } | ||
361 | pci_set_master(pdev); | ||
362 | |||
363 | if (adf_enable_aer(accel_dev, &adf_driver)) { | ||
364 | dev_err(&pdev->dev, "Failed to enable aer\n"); | ||
365 | ret = -EFAULT; | ||
366 | goto out_err; | ||
367 | } | ||
368 | |||
369 | if (adf_init_etr_data(accel_dev)) { | ||
370 | dev_err(&pdev->dev, "Failed initialize etr\n"); | ||
371 | ret = -EFAULT; | ||
372 | goto out_err; | ||
373 | } | ||
374 | |||
375 | if (adf_init_admin_comms(accel_dev)) { | ||
376 | dev_err(&pdev->dev, "Failed initialize admin comms\n"); | ||
377 | ret = -EFAULT; | ||
378 | goto out_err; | ||
379 | } | ||
380 | |||
381 | if (adf_init_arb(accel_dev)) { | ||
382 | dev_err(&pdev->dev, "Failed initialize hw arbiter\n"); | ||
383 | ret = -EFAULT; | ||
384 | goto out_err; | ||
385 | } | ||
386 | if (pci_save_state(pdev)) { | ||
387 | dev_err(&pdev->dev, "Failed to save pci state\n"); | ||
388 | ret = -ENOMEM; | ||
389 | goto out_err; | ||
390 | } | ||
391 | |||
392 | /* Enable bundle and misc interrupts */ | ||
393 | ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, | ||
394 | ADF_DH895XCC_SMIA0_MASK); | ||
395 | ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, | ||
396 | ADF_DH895XCC_SMIA1_MASK); | ||
397 | |||
398 | ret = qat_dev_start(accel_dev); | ||
399 | if (ret) { | ||
400 | adf_dev_stop(accel_dev); | ||
401 | goto out_err; | ||
402 | } | ||
403 | |||
404 | return 0; | ||
405 | out_err: | ||
406 | adf_cleanup_accel(accel_dev); | ||
407 | return ret; | ||
408 | } | ||
409 | |||
410 | static void __exit adf_remove(struct pci_dev *pdev) | ||
411 | { | ||
412 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
413 | |||
414 | if (!accel_dev) { | ||
415 | pr_err("QAT: Driver removal failed\n"); | ||
416 | return; | ||
417 | } | ||
418 | if (adf_dev_stop(accel_dev)) | ||
419 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); | ||
420 | adf_disable_aer(accel_dev); | ||
421 | adf_cleanup_accel(accel_dev); | ||
422 | } | ||
423 | |||
424 | static int __init adfdrv_init(void) | ||
425 | { | ||
426 | request_module("intel_qat"); | ||
427 | if (qat_admin_register()) | ||
428 | return -EFAULT; | ||
429 | |||
430 | if (pci_register_driver(&adf_driver)) { | ||
431 | pr_err("QAT: Driver initialization failed\n"); | ||
432 | return -EFAULT; | ||
433 | } | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | static void __exit adfdrv_release(void) | ||
438 | { | ||
439 | pci_unregister_driver(&adf_driver); | ||
440 | qat_admin_unregister(); | ||
441 | } | ||
442 | |||
443 | module_init(adfdrv_init); | ||
444 | module_exit(adfdrv_release); | ||
445 | |||
446 | MODULE_LICENSE("Dual BSD/GPL"); | ||
447 | MODULE_AUTHOR("Intel"); | ||
448 | MODULE_FIRMWARE("qat_895xcc.bin"); | ||
449 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h new file mode 100644 index 000000000000..a2fbb6ce75cd --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_DH895x_DRV_H_ | ||
48 | #define ADF_DH895x_DRV_H_ | ||
49 | #include <adf_accel_devices.h> | ||
50 | #include <adf_transport.h> | ||
51 | |||
52 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | ||
53 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | ||
54 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); | ||
55 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev); | ||
56 | void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); | ||
57 | void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | ||
58 | uint32_t const **arb_map_config); | ||
59 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev); | ||
60 | void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); | ||
61 | int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, | ||
62 | uint32_t ae, void *in, void *out); | ||
63 | int qat_admin_register(void); | ||
64 | int qat_admin_unregister(void); | ||
65 | int adf_init_arb(struct adf_accel_dev *accel_dev); | ||
66 | void adf_exit_arb(struct adf_accel_dev *accel_dev); | ||
67 | #endif | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c new file mode 100644 index 000000000000..1864bdb36f8f --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <adf_accel_devices.h> | ||
48 | #include <adf_transport_internal.h> | ||
49 | #include "adf_drv.h" | ||
50 | |||
51 | #define ADF_ARB_NUM 4 | ||
52 | #define ADF_ARB_REQ_RING_NUM 8 | ||
53 | #define ADF_ARB_REG_SIZE 0x4 | ||
54 | #define ADF_ARB_WTR_SIZE 0x20 | ||
55 | #define ADF_ARB_OFFSET 0x30000 | ||
56 | #define ADF_ARB_REG_SLOT 0x1000 | ||
57 | #define ADF_ARB_WTR_OFFSET 0x010 | ||
58 | #define ADF_ARB_RO_EN_OFFSET 0x090 | ||
59 | #define ADF_ARB_WQCFG_OFFSET 0x100 | ||
60 | #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 | ||
61 | #define ADF_ARB_WRK_2_SER_MAP 10 | ||
62 | #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C | ||
63 | |||
64 | #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ | ||
65 | ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ | ||
66 | (ADF_ARB_REG_SLOT * index), value) | ||
67 | |||
68 | #define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \ | ||
69 | ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ | ||
70 | ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value) | ||
71 | |||
72 | #define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \ | ||
73 | ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ | ||
74 | ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \ | ||
75 | (ADF_ARB_REG_SIZE * index), value) | ||
76 | |||
77 | #define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \ | ||
78 | ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \ | ||
79 | (ADF_ARB_REG_SIZE * index), value) | ||
80 | |||
81 | #define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \ | ||
82 | ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ | ||
83 | ADF_ARB_WRK_2_SER_MAP_OFFSET) + \ | ||
84 | (ADF_ARB_REG_SIZE * index), value) | ||
85 | |||
86 | #define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \ | ||
87 | ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ | ||
88 | ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value) | ||
89 | |||
90 | int adf_init_arb(struct adf_accel_dev *accel_dev) | ||
91 | { | ||
92 | void __iomem *csr = accel_dev->transport->banks[0].csr_addr; | ||
93 | uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; | ||
94 | uint32_t arb, i; | ||
95 | const uint32_t *thd_2_arb_cfg; | ||
96 | |||
97 | /* Service arb configured for 32 bytes responses and | ||
98 | * ring flow control check enabled. */ | ||
99 | for (arb = 0; arb < ADF_ARB_NUM; arb++) | ||
100 | WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg); | ||
101 | |||
102 | /* Setup service weighting */ | ||
103 | for (arb = 0; arb < ADF_ARB_NUM; arb++) | ||
104 | for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) | ||
105 | WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF); | ||
106 | |||
107 | /* Setup ring response ordering */ | ||
108 | for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) | ||
109 | WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); | ||
110 | |||
111 | /* Setup worker queue registers */ | ||
112 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | ||
113 | WRITE_CSR_ARB_WQCFG(csr, i, i); | ||
114 | |||
115 | /* Map worker threads to service arbiters */ | ||
116 | adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg); | ||
117 | |||
118 | if (!thd_2_arb_cfg) | ||
119 | return -EFAULT; | ||
120 | |||
121 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | ||
122 | WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i)); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring) | ||
128 | { | ||
129 | WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, | ||
130 | ring->bank->bank_number, | ||
131 | ring->bank->ring_mask & 0xFF); | ||
132 | } | ||
133 | |||
134 | void adf_exit_arb(struct adf_accel_dev *accel_dev) | ||
135 | { | ||
136 | void __iomem *csr; | ||
137 | unsigned int i; | ||
138 | |||
139 | if (!accel_dev->transport) | ||
140 | return; | ||
141 | |||
142 | csr = accel_dev->transport->banks[0].csr_addr; | ||
143 | |||
144 | /* Reset arbiter configuration */ | ||
145 | for (i = 0; i < ADF_ARB_NUM; i++) | ||
146 | WRITE_CSR_ARB_SARCONFIG(csr, i, 0); | ||
147 | |||
148 | /* Shutdown work queue */ | ||
149 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | ||
150 | WRITE_CSR_ARB_WQCFG(csr, i, 0); | ||
151 | |||
152 | /* Unmap worker threads to service arbiters */ | ||
153 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | ||
154 | WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0); | ||
155 | |||
156 | /* Disable arbitration on all rings */ | ||
157 | for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) | ||
158 | WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); | ||
159 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c new file mode 100644 index 000000000000..d4172dedf775 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c | |||
@@ -0,0 +1,266 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/init.h> | ||
49 | #include <linux/types.h> | ||
50 | #include <linux/pci.h> | ||
51 | #include <linux/slab.h> | ||
52 | #include <linux/errno.h> | ||
53 | #include <linux/interrupt.h> | ||
54 | #include <adf_accel_devices.h> | ||
55 | #include <adf_common_drv.h> | ||
56 | #include <adf_cfg.h> | ||
57 | #include <adf_cfg_strings.h> | ||
58 | #include <adf_cfg_common.h> | ||
59 | #include <adf_transport_access_macros.h> | ||
60 | #include <adf_transport_internal.h> | ||
61 | #include "adf_drv.h" | ||
62 | |||
63 | static int adf_enable_msix(struct adf_accel_dev *accel_dev) | ||
64 | { | ||
65 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; | ||
66 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
67 | uint32_t msix_num_entries = hw_data->num_banks + 1; | ||
68 | int i; | ||
69 | |||
70 | for (i = 0; i < msix_num_entries; i++) | ||
71 | pci_dev_info->msix_entries.entries[i].entry = i; | ||
72 | |||
73 | if (pci_enable_msix(pci_dev_info->pci_dev, | ||
74 | pci_dev_info->msix_entries.entries, | ||
75 | msix_num_entries)) { | ||
76 | pr_err("QAT: Failed to enable MSIX IRQ\n"); | ||
77 | return -EFAULT; | ||
78 | } | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) | ||
83 | { | ||
84 | pci_disable_msix(pci_dev_info->pci_dev); | ||
85 | } | ||
86 | |||
87 | static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) | ||
88 | { | ||
89 | struct adf_etr_bank_data *bank = bank_ptr; | ||
90 | |||
91 | WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); | ||
92 | tasklet_hi_schedule(&bank->resp_hanlder); | ||
93 | return IRQ_HANDLED; | ||
94 | } | ||
95 | |||
96 | static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) | ||
97 | { | ||
98 | struct adf_accel_dev *accel_dev = dev_ptr; | ||
99 | |||
100 | pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id); | ||
101 | return IRQ_HANDLED; | ||
102 | } | ||
103 | |||
104 | static int adf_request_irqs(struct adf_accel_dev *accel_dev) | ||
105 | { | ||
106 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; | ||
107 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
108 | struct msix_entry *msixe = pci_dev_info->msix_entries.entries; | ||
109 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
110 | int ret, i; | ||
111 | char *name; | ||
112 | |||
113 | /* Request msix irq for all banks */ | ||
114 | for (i = 0; i < hw_data->num_banks; i++) { | ||
115 | struct adf_etr_bank_data *bank = &etr_data->banks[i]; | ||
116 | unsigned int cpu, cpus = num_online_cpus(); | ||
117 | |||
118 | name = *(pci_dev_info->msix_entries.names + i); | ||
119 | snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, | ||
120 | "qat%d-bundle%d", accel_dev->accel_id, i); | ||
121 | ret = request_irq(msixe[i].vector, | ||
122 | adf_msix_isr_bundle, 0, name, bank); | ||
123 | if (ret) { | ||
124 | pr_err("QAT: failed to enable irq %d for %s\n", | ||
125 | msixe[i].vector, name); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; | ||
130 | irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu)); | ||
131 | } | ||
132 | |||
133 | /* Request msix irq for AE */ | ||
134 | name = *(pci_dev_info->msix_entries.names + i); | ||
135 | snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, | ||
136 | "qat%d-ae-cluster", accel_dev->accel_id); | ||
137 | ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); | ||
138 | if (ret) { | ||
139 | pr_err("QAT: failed to enable irq %d, for %s\n", | ||
140 | msixe[i].vector, name); | ||
141 | return ret; | ||
142 | } | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | static void adf_free_irqs(struct adf_accel_dev *accel_dev) | ||
147 | { | ||
148 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; | ||
149 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
150 | struct msix_entry *msixe = pci_dev_info->msix_entries.entries; | ||
151 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
152 | int i; | ||
153 | |||
154 | for (i = 0; i < hw_data->num_banks; i++) { | ||
155 | irq_set_affinity_hint(msixe[i].vector, NULL); | ||
156 | free_irq(msixe[i].vector, &etr_data->banks[i]); | ||
157 | } | ||
158 | irq_set_affinity_hint(msixe[i].vector, NULL); | ||
159 | free_irq(msixe[i].vector, accel_dev); | ||
160 | } | ||
161 | |||
162 | static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) | ||
163 | { | ||
164 | int i; | ||
165 | char **names; | ||
166 | struct msix_entry *entries; | ||
167 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
168 | uint32_t msix_num_entries = hw_data->num_banks + 1; | ||
169 | |||
170 | entries = kzalloc_node(msix_num_entries * sizeof(*entries), | ||
171 | GFP_KERNEL, accel_dev->numa_node); | ||
172 | if (!entries) | ||
173 | return -ENOMEM; | ||
174 | |||
175 | names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); | ||
176 | if (!names) { | ||
177 | kfree(entries); | ||
178 | return -ENOMEM; | ||
179 | } | ||
180 | for (i = 0; i < msix_num_entries; i++) { | ||
181 | *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); | ||
182 | if (!(*(names + i))) | ||
183 | goto err; | ||
184 | } | ||
185 | accel_dev->accel_pci_dev.msix_entries.entries = entries; | ||
186 | accel_dev->accel_pci_dev.msix_entries.names = names; | ||
187 | return 0; | ||
188 | err: | ||
189 | for (i = 0; i < msix_num_entries; i++) { | ||
190 | if (*(names + i)) | ||
191 | kfree(*(names + i)); | ||
192 | } | ||
193 | kfree(entries); | ||
194 | kfree(names); | ||
195 | return -ENOMEM; | ||
196 | } | ||
197 | |||
198 | static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) | ||
199 | { | ||
200 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
201 | uint32_t msix_num_entries = hw_data->num_banks + 1; | ||
202 | char **names = accel_dev->accel_pci_dev.msix_entries.names; | ||
203 | int i; | ||
204 | |||
205 | kfree(accel_dev->accel_pci_dev.msix_entries.entries); | ||
206 | for (i = 0; i < msix_num_entries; i++) { | ||
207 | if (*(names + i)) | ||
208 | kfree(*(names + i)); | ||
209 | } | ||
210 | kfree(names); | ||
211 | } | ||
212 | |||
213 | static int adf_setup_bh(struct adf_accel_dev *accel_dev) | ||
214 | { | ||
215 | struct adf_etr_data *priv_data = accel_dev->transport; | ||
216 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
217 | int i; | ||
218 | |||
219 | for (i = 0; i < hw_data->num_banks; i++) | ||
220 | tasklet_init(&priv_data->banks[i].resp_hanlder, | ||
221 | adf_response_handler, | ||
222 | (unsigned long)&priv_data->banks[i]); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) | ||
227 | { | ||
228 | struct adf_etr_data *priv_data = accel_dev->transport; | ||
229 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
230 | int i; | ||
231 | |||
232 | for (i = 0; i < hw_data->num_banks; i++) { | ||
233 | tasklet_disable(&priv_data->banks[i].resp_hanlder); | ||
234 | tasklet_kill(&priv_data->banks[i].resp_hanlder); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev) | ||
239 | { | ||
240 | adf_free_irqs(accel_dev); | ||
241 | adf_cleanup_bh(accel_dev); | ||
242 | adf_disable_msix(&accel_dev->accel_pci_dev); | ||
243 | adf_isr_free_msix_entry_table(accel_dev); | ||
244 | } | ||
245 | |||
246 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) | ||
247 | { | ||
248 | int ret; | ||
249 | |||
250 | ret = adf_isr_alloc_msix_entry_table(accel_dev); | ||
251 | if (ret) | ||
252 | return ret; | ||
253 | if (adf_enable_msix(accel_dev)) | ||
254 | goto err_out; | ||
255 | |||
256 | if (adf_setup_bh(accel_dev)) | ||
257 | goto err_out; | ||
258 | |||
259 | if (adf_request_irqs(accel_dev)) | ||
260 | goto err_out; | ||
261 | |||
262 | return 0; | ||
263 | err_out: | ||
264 | adf_isr_resource_free(accel_dev); | ||
265 | return -EFAULT; | ||
266 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c new file mode 100644 index 000000000000..55b7a8e48bad --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <icp_qat_fw_init_admin.h> | ||
48 | #include <adf_accel_devices.h> | ||
49 | #include <adf_common_drv.h> | ||
50 | #include "adf_drv.h" | ||
51 | |||
52 | static struct service_hndl qat_admin; | ||
53 | |||
54 | static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) | ||
55 | { | ||
56 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
57 | struct icp_qat_fw_init_admin_req req; | ||
58 | struct icp_qat_fw_init_admin_resp resp; | ||
59 | int i; | ||
60 | |||
61 | memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); | ||
62 | req.init_admin_cmd_id = cmd; | ||
63 | for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { | ||
64 | memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); | ||
65 | if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || | ||
66 | resp.init_resp_hdr.status) | ||
67 | return -EFAULT; | ||
68 | } | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int qat_admin_start(struct adf_accel_dev *accel_dev) | ||
73 | { | ||
74 | return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); | ||
75 | } | ||
76 | |||
77 | static int qat_admin_event_handler(struct adf_accel_dev *accel_dev, | ||
78 | enum adf_event event) | ||
79 | { | ||
80 | int ret; | ||
81 | |||
82 | switch (event) { | ||
83 | case ADF_EVENT_START: | ||
84 | ret = qat_admin_start(accel_dev); | ||
85 | break; | ||
86 | case ADF_EVENT_STOP: | ||
87 | case ADF_EVENT_INIT: | ||
88 | case ADF_EVENT_SHUTDOWN: | ||
89 | default: | ||
90 | ret = 0; | ||
91 | } | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | int qat_admin_register(void) | ||
96 | { | ||
97 | memset(&qat_admin, 0, sizeof(struct service_hndl)); | ||
98 | qat_admin.event_hld = qat_admin_event_handler; | ||
99 | qat_admin.name = "qat_admin"; | ||
100 | qat_admin.admin = 1; | ||
101 | return adf_service_register(&qat_admin); | ||
102 | } | ||
103 | |||
104 | int qat_admin_unregister(void) | ||
105 | { | ||
106 | return adf_service_unregister(&qat_admin); | ||
107 | } | ||
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile new file mode 100644 index 000000000000..348dc3173afa --- /dev/null +++ b/drivers/crypto/qce/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o | ||
2 | qcrypto-objs := core.o \ | ||
3 | common.o \ | ||
4 | dma.o \ | ||
5 | sha.o \ | ||
6 | ablkcipher.o | ||
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c new file mode 100644 index 000000000000..ad592de475a4 --- /dev/null +++ b/drivers/crypto/qce/ablkcipher.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <crypto/aes.h> | ||
18 | #include <crypto/algapi.h> | ||
19 | #include <crypto/des.h> | ||
20 | |||
21 | #include "cipher.h" | ||
22 | |||
23 | static LIST_HEAD(ablkcipher_algs); | ||
24 | |||
25 | static void qce_ablkcipher_done(void *data) | ||
26 | { | ||
27 | struct crypto_async_request *async_req = data; | ||
28 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | ||
29 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | ||
30 | struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); | ||
31 | struct qce_device *qce = tmpl->qce; | ||
32 | enum dma_data_direction dir_src, dir_dst; | ||
33 | u32 status; | ||
34 | int error; | ||
35 | bool diff_dst; | ||
36 | |||
37 | diff_dst = (req->src != req->dst) ? true : false; | ||
38 | dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; | ||
39 | dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; | ||
40 | |||
41 | error = qce_dma_terminate_all(&qce->dma); | ||
42 | if (error) | ||
43 | dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", | ||
44 | error); | ||
45 | |||
46 | if (diff_dst) | ||
47 | qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src, | ||
48 | rctx->dst_chained); | ||
49 | qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, | ||
50 | rctx->dst_chained); | ||
51 | |||
52 | sg_free_table(&rctx->dst_tbl); | ||
53 | |||
54 | error = qce_check_status(qce, &status); | ||
55 | if (error < 0) | ||
56 | dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); | ||
57 | |||
58 | qce->async_req_done(tmpl->qce, error); | ||
59 | } | ||
60 | |||
61 | static int | ||
62 | qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) | ||
63 | { | ||
64 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | ||
65 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | ||
66 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
67 | struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); | ||
68 | struct qce_device *qce = tmpl->qce; | ||
69 | enum dma_data_direction dir_src, dir_dst; | ||
70 | struct scatterlist *sg; | ||
71 | bool diff_dst; | ||
72 | gfp_t gfp; | ||
73 | int ret; | ||
74 | |||
75 | rctx->iv = req->info; | ||
76 | rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
77 | rctx->cryptlen = req->nbytes; | ||
78 | |||
79 | diff_dst = (req->src != req->dst) ? true : false; | ||
80 | dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; | ||
81 | dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; | ||
82 | |||
83 | rctx->src_nents = qce_countsg(req->src, req->nbytes, | ||
84 | &rctx->src_chained); | ||
85 | if (diff_dst) { | ||
86 | rctx->dst_nents = qce_countsg(req->dst, req->nbytes, | ||
87 | &rctx->dst_chained); | ||
88 | } else { | ||
89 | rctx->dst_nents = rctx->src_nents; | ||
90 | rctx->dst_chained = rctx->src_chained; | ||
91 | } | ||
92 | |||
93 | rctx->dst_nents += 1; | ||
94 | |||
95 | gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
96 | GFP_KERNEL : GFP_ATOMIC; | ||
97 | |||
98 | ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); | ||
99 | if (ret) | ||
100 | return ret; | ||
101 | |||
102 | sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); | ||
103 | |||
104 | sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); | ||
105 | if (IS_ERR(sg)) { | ||
106 | ret = PTR_ERR(sg); | ||
107 | goto error_free; | ||
108 | } | ||
109 | |||
110 | sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); | ||
111 | if (IS_ERR(sg)) { | ||
112 | ret = PTR_ERR(sg); | ||
113 | goto error_free; | ||
114 | } | ||
115 | |||
116 | sg_mark_end(sg); | ||
117 | rctx->dst_sg = rctx->dst_tbl.sgl; | ||
118 | |||
119 | ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, | ||
120 | rctx->dst_chained); | ||
121 | if (ret < 0) | ||
122 | goto error_free; | ||
123 | |||
124 | if (diff_dst) { | ||
125 | ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src, | ||
126 | rctx->src_chained); | ||
127 | if (ret < 0) | ||
128 | goto error_unmap_dst; | ||
129 | rctx->src_sg = req->src; | ||
130 | } else { | ||
131 | rctx->src_sg = rctx->dst_sg; | ||
132 | } | ||
133 | |||
134 | ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, | ||
135 | rctx->dst_sg, rctx->dst_nents, | ||
136 | qce_ablkcipher_done, async_req); | ||
137 | if (ret) | ||
138 | goto error_unmap_src; | ||
139 | |||
140 | qce_dma_issue_pending(&qce->dma); | ||
141 | |||
142 | ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); | ||
143 | if (ret) | ||
144 | goto error_terminate; | ||
145 | |||
146 | return 0; | ||
147 | |||
148 | error_terminate: | ||
149 | qce_dma_terminate_all(&qce->dma); | ||
150 | error_unmap_src: | ||
151 | if (diff_dst) | ||
152 | qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src, | ||
153 | rctx->src_chained); | ||
154 | error_unmap_dst: | ||
155 | qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, | ||
156 | rctx->dst_chained); | ||
157 | error_free: | ||
158 | sg_free_table(&rctx->dst_tbl); | ||
159 | return ret; | ||
160 | } | ||
161 | |||
162 | static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, | ||
163 | unsigned int keylen) | ||
164 | { | ||
165 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); | ||
166 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
167 | unsigned long flags = to_cipher_tmpl(tfm)->alg_flags; | ||
168 | int ret; | ||
169 | |||
170 | if (!key || !keylen) | ||
171 | return -EINVAL; | ||
172 | |||
173 | if (IS_AES(flags)) { | ||
174 | switch (keylen) { | ||
175 | case AES_KEYSIZE_128: | ||
176 | case AES_KEYSIZE_256: | ||
177 | break; | ||
178 | default: | ||
179 | goto fallback; | ||
180 | } | ||
181 | } else if (IS_DES(flags)) { | ||
182 | u32 tmp[DES_EXPKEY_WORDS]; | ||
183 | |||
184 | ret = des_ekey(tmp, key); | ||
185 | if (!ret && crypto_ablkcipher_get_flags(ablk) & | ||
186 | CRYPTO_TFM_REQ_WEAK_KEY) | ||
187 | goto weakkey; | ||
188 | } | ||
189 | |||
190 | ctx->enc_keylen = keylen; | ||
191 | memcpy(ctx->enc_key, key, keylen); | ||
192 | return 0; | ||
193 | fallback: | ||
194 | ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); | ||
195 | if (!ret) | ||
196 | ctx->enc_keylen = keylen; | ||
197 | return ret; | ||
198 | weakkey: | ||
199 | crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); | ||
200 | return -EINVAL; | ||
201 | } | ||
202 | |||
203 | static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) | ||
204 | { | ||
205 | struct crypto_tfm *tfm = | ||
206 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
207 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
208 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | ||
209 | struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); | ||
210 | int ret; | ||
211 | |||
212 | rctx->flags = tmpl->alg_flags; | ||
213 | rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; | ||
214 | |||
215 | if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && | ||
216 | ctx->enc_keylen != AES_KEYSIZE_256) { | ||
217 | ablkcipher_request_set_tfm(req, ctx->fallback); | ||
218 | ret = encrypt ? crypto_ablkcipher_encrypt(req) : | ||
219 | crypto_ablkcipher_decrypt(req); | ||
220 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); | ||
225 | } | ||
226 | |||
227 | static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) | ||
228 | { | ||
229 | return qce_ablkcipher_crypt(req, 1); | ||
230 | } | ||
231 | |||
232 | static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) | ||
233 | { | ||
234 | return qce_ablkcipher_crypt(req, 0); | ||
235 | } | ||
236 | |||
237 | static int qce_ablkcipher_init(struct crypto_tfm *tfm) | ||
238 | { | ||
239 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
240 | |||
241 | memset(ctx, 0, sizeof(*ctx)); | ||
242 | tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); | ||
243 | |||
244 | ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), | ||
245 | CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
246 | CRYPTO_ALG_ASYNC | | ||
247 | CRYPTO_ALG_NEED_FALLBACK); | ||
248 | if (IS_ERR(ctx->fallback)) | ||
249 | return PTR_ERR(ctx->fallback); | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static void qce_ablkcipher_exit(struct crypto_tfm *tfm) | ||
255 | { | ||
256 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
257 | |||
258 | crypto_free_ablkcipher(ctx->fallback); | ||
259 | } | ||
260 | |||
261 | struct qce_ablkcipher_def { | ||
262 | unsigned long flags; | ||
263 | const char *name; | ||
264 | const char *drv_name; | ||
265 | unsigned int blocksize; | ||
266 | unsigned int ivsize; | ||
267 | unsigned int min_keysize; | ||
268 | unsigned int max_keysize; | ||
269 | }; | ||
270 | |||
271 | static const struct qce_ablkcipher_def ablkcipher_def[] = { | ||
272 | { | ||
273 | .flags = QCE_ALG_AES | QCE_MODE_ECB, | ||
274 | .name = "ecb(aes)", | ||
275 | .drv_name = "ecb-aes-qce", | ||
276 | .blocksize = AES_BLOCK_SIZE, | ||
277 | .ivsize = AES_BLOCK_SIZE, | ||
278 | .min_keysize = AES_MIN_KEY_SIZE, | ||
279 | .max_keysize = AES_MAX_KEY_SIZE, | ||
280 | }, | ||
281 | { | ||
282 | .flags = QCE_ALG_AES | QCE_MODE_CBC, | ||
283 | .name = "cbc(aes)", | ||
284 | .drv_name = "cbc-aes-qce", | ||
285 | .blocksize = AES_BLOCK_SIZE, | ||
286 | .ivsize = AES_BLOCK_SIZE, | ||
287 | .min_keysize = AES_MIN_KEY_SIZE, | ||
288 | .max_keysize = AES_MAX_KEY_SIZE, | ||
289 | }, | ||
290 | { | ||
291 | .flags = QCE_ALG_AES | QCE_MODE_CTR, | ||
292 | .name = "ctr(aes)", | ||
293 | .drv_name = "ctr-aes-qce", | ||
294 | .blocksize = AES_BLOCK_SIZE, | ||
295 | .ivsize = AES_BLOCK_SIZE, | ||
296 | .min_keysize = AES_MIN_KEY_SIZE, | ||
297 | .max_keysize = AES_MAX_KEY_SIZE, | ||
298 | }, | ||
299 | { | ||
300 | .flags = QCE_ALG_AES | QCE_MODE_XTS, | ||
301 | .name = "xts(aes)", | ||
302 | .drv_name = "xts-aes-qce", | ||
303 | .blocksize = AES_BLOCK_SIZE, | ||
304 | .ivsize = AES_BLOCK_SIZE, | ||
305 | .min_keysize = AES_MIN_KEY_SIZE, | ||
306 | .max_keysize = AES_MAX_KEY_SIZE, | ||
307 | }, | ||
308 | { | ||
309 | .flags = QCE_ALG_DES | QCE_MODE_ECB, | ||
310 | .name = "ecb(des)", | ||
311 | .drv_name = "ecb-des-qce", | ||
312 | .blocksize = DES_BLOCK_SIZE, | ||
313 | .ivsize = 0, | ||
314 | .min_keysize = DES_KEY_SIZE, | ||
315 | .max_keysize = DES_KEY_SIZE, | ||
316 | }, | ||
317 | { | ||
318 | .flags = QCE_ALG_DES | QCE_MODE_CBC, | ||
319 | .name = "cbc(des)", | ||
320 | .drv_name = "cbc-des-qce", | ||
321 | .blocksize = DES_BLOCK_SIZE, | ||
322 | .ivsize = DES_BLOCK_SIZE, | ||
323 | .min_keysize = DES_KEY_SIZE, | ||
324 | .max_keysize = DES_KEY_SIZE, | ||
325 | }, | ||
326 | { | ||
327 | .flags = QCE_ALG_3DES | QCE_MODE_ECB, | ||
328 | .name = "ecb(des3_ede)", | ||
329 | .drv_name = "ecb-3des-qce", | ||
330 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
331 | .ivsize = 0, | ||
332 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
333 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
334 | }, | ||
335 | { | ||
336 | .flags = QCE_ALG_3DES | QCE_MODE_CBC, | ||
337 | .name = "cbc(des3_ede)", | ||
338 | .drv_name = "cbc-3des-qce", | ||
339 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
340 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
341 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
342 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
343 | }, | ||
344 | }; | ||
345 | |||
346 | static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, | ||
347 | struct qce_device *qce) | ||
348 | { | ||
349 | struct qce_alg_template *tmpl; | ||
350 | struct crypto_alg *alg; | ||
351 | int ret; | ||
352 | |||
353 | tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); | ||
354 | if (!tmpl) | ||
355 | return -ENOMEM; | ||
356 | |||
357 | alg = &tmpl->alg.crypto; | ||
358 | |||
359 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | ||
360 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
361 | def->drv_name); | ||
362 | |||
363 | alg->cra_blocksize = def->blocksize; | ||
364 | alg->cra_ablkcipher.ivsize = def->ivsize; | ||
365 | alg->cra_ablkcipher.min_keysize = def->min_keysize; | ||
366 | alg->cra_ablkcipher.max_keysize = def->max_keysize; | ||
367 | alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey; | ||
368 | alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; | ||
369 | alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; | ||
370 | |||
371 | alg->cra_priority = 300; | ||
372 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | | ||
373 | CRYPTO_ALG_NEED_FALLBACK; | ||
374 | alg->cra_ctxsize = sizeof(struct qce_cipher_ctx); | ||
375 | alg->cra_alignmask = 0; | ||
376 | alg->cra_type = &crypto_ablkcipher_type; | ||
377 | alg->cra_module = THIS_MODULE; | ||
378 | alg->cra_init = qce_ablkcipher_init; | ||
379 | alg->cra_exit = qce_ablkcipher_exit; | ||
380 | INIT_LIST_HEAD(&alg->cra_list); | ||
381 | |||
382 | INIT_LIST_HEAD(&tmpl->entry); | ||
383 | tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; | ||
384 | tmpl->alg_flags = def->flags; | ||
385 | tmpl->qce = qce; | ||
386 | |||
387 | ret = crypto_register_alg(alg); | ||
388 | if (ret) { | ||
389 | kfree(tmpl); | ||
390 | dev_err(qce->dev, "%s registration failed\n", alg->cra_name); | ||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | list_add_tail(&tmpl->entry, &ablkcipher_algs); | ||
395 | dev_dbg(qce->dev, "%s is registered\n", alg->cra_name); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static void qce_ablkcipher_unregister(struct qce_device *qce) | ||
400 | { | ||
401 | struct qce_alg_template *tmpl, *n; | ||
402 | |||
403 | list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { | ||
404 | crypto_unregister_alg(&tmpl->alg.crypto); | ||
405 | list_del(&tmpl->entry); | ||
406 | kfree(tmpl); | ||
407 | } | ||
408 | } | ||
409 | |||
410 | static int qce_ablkcipher_register(struct qce_device *qce) | ||
411 | { | ||
412 | int ret, i; | ||
413 | |||
414 | for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) { | ||
415 | ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce); | ||
416 | if (ret) | ||
417 | goto err; | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | err: | ||
422 | qce_ablkcipher_unregister(qce); | ||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | const struct qce_algo_ops ablkcipher_ops = { | ||
427 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
428 | .register_algs = qce_ablkcipher_register, | ||
429 | .unregister_algs = qce_ablkcipher_unregister, | ||
430 | .async_req_handle = qce_ablkcipher_async_req_handle, | ||
431 | }; | ||
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h new file mode 100644 index 000000000000..d5757cfcda2d --- /dev/null +++ b/drivers/crypto/qce/cipher.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _CIPHER_H_ | ||
15 | #define _CIPHER_H_ | ||
16 | |||
17 | #include "common.h" | ||
18 | #include "core.h" | ||
19 | |||
20 | #define QCE_MAX_KEY_SIZE 64 | ||
21 | |||
22 | struct qce_cipher_ctx { | ||
23 | u8 enc_key[QCE_MAX_KEY_SIZE]; | ||
24 | unsigned int enc_keylen; | ||
25 | struct crypto_ablkcipher *fallback; | ||
26 | }; | ||
27 | |||
28 | /** | ||
29 | * struct qce_cipher_reqctx - holds private cipher objects per request | ||
30 | * @flags: operation flags | ||
31 | * @iv: pointer to the IV | ||
32 | * @ivsize: IV size | ||
33 | * @src_nents: source entries | ||
34 | * @dst_nents: destination entries | ||
35 | * @src_chained: is source chained | ||
36 | * @dst_chained: is destination chained | ||
37 | * @result_sg: scatterlist used for result buffer | ||
38 | * @dst_tbl: destination sg table | ||
39 | * @dst_sg: destination sg pointer table beginning | ||
40 | * @src_tbl: source sg table | ||
41 | * @src_sg: source sg pointer table beginning; | ||
42 | * @cryptlen: crypto length | ||
43 | */ | ||
44 | struct qce_cipher_reqctx { | ||
45 | unsigned long flags; | ||
46 | u8 *iv; | ||
47 | unsigned int ivsize; | ||
48 | int src_nents; | ||
49 | int dst_nents; | ||
50 | bool src_chained; | ||
51 | bool dst_chained; | ||
52 | struct scatterlist result_sg; | ||
53 | struct sg_table dst_tbl; | ||
54 | struct scatterlist *dst_sg; | ||
55 | struct sg_table src_tbl; | ||
56 | struct scatterlist *src_sg; | ||
57 | unsigned int cryptlen; | ||
58 | }; | ||
59 | |||
60 | static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm) | ||
61 | { | ||
62 | struct crypto_alg *alg = tfm->__crt_alg; | ||
63 | return container_of(alg, struct qce_alg_template, alg.crypto); | ||
64 | } | ||
65 | |||
66 | extern const struct qce_algo_ops ablkcipher_ops; | ||
67 | |||
68 | #endif /* _CIPHER_H_ */ | ||
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c new file mode 100644 index 000000000000..1fb5fde7fc03 --- /dev/null +++ b/drivers/crypto/qce/common.c | |||
@@ -0,0 +1,438 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/err.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <crypto/scatterwalk.h> | ||
18 | #include <crypto/sha.h> | ||
19 | |||
20 | #include "cipher.h" | ||
21 | #include "common.h" | ||
22 | #include "core.h" | ||
23 | #include "regs-v5.h" | ||
24 | #include "sha.h" | ||
25 | |||
26 | #define QCE_SECTOR_SIZE 512 | ||
27 | |||
28 | static inline u32 qce_read(struct qce_device *qce, u32 offset) | ||
29 | { | ||
30 | return readl(qce->base + offset); | ||
31 | } | ||
32 | |||
33 | static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) | ||
34 | { | ||
35 | writel(val, qce->base + offset); | ||
36 | } | ||
37 | |||
38 | static inline void qce_write_array(struct qce_device *qce, u32 offset, | ||
39 | const u32 *val, unsigned int len) | ||
40 | { | ||
41 | int i; | ||
42 | |||
43 | for (i = 0; i < len; i++) | ||
44 | qce_write(qce, offset + i * sizeof(u32), val[i]); | ||
45 | } | ||
46 | |||
47 | static inline void | ||
48 | qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) | ||
49 | { | ||
50 | int i; | ||
51 | |||
52 | for (i = 0; i < len; i++) | ||
53 | qce_write(qce, offset + i * sizeof(u32), 0); | ||
54 | } | ||
55 | |||
56 | static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) | ||
57 | { | ||
58 | u32 cfg = 0; | ||
59 | |||
60 | if (IS_AES(flags)) { | ||
61 | if (aes_key_size == AES_KEYSIZE_128) | ||
62 | cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; | ||
63 | else if (aes_key_size == AES_KEYSIZE_256) | ||
64 | cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; | ||
65 | } | ||
66 | |||
67 | if (IS_AES(flags)) | ||
68 | cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; | ||
69 | else if (IS_DES(flags) || IS_3DES(flags)) | ||
70 | cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; | ||
71 | |||
72 | if (IS_DES(flags)) | ||
73 | cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; | ||
74 | |||
75 | if (IS_3DES(flags)) | ||
76 | cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; | ||
77 | |||
78 | switch (flags & QCE_MODE_MASK) { | ||
79 | case QCE_MODE_ECB: | ||
80 | cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; | ||
81 | break; | ||
82 | case QCE_MODE_CBC: | ||
83 | cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; | ||
84 | break; | ||
85 | case QCE_MODE_CTR: | ||
86 | cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; | ||
87 | break; | ||
88 | case QCE_MODE_XTS: | ||
89 | cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; | ||
90 | break; | ||
91 | case QCE_MODE_CCM: | ||
92 | cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; | ||
93 | cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; | ||
94 | break; | ||
95 | default: | ||
96 | return ~0; | ||
97 | } | ||
98 | |||
99 | return cfg; | ||
100 | } | ||
101 | |||
102 | static u32 qce_auth_cfg(unsigned long flags, u32 key_size) | ||
103 | { | ||
104 | u32 cfg = 0; | ||
105 | |||
106 | if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags))) | ||
107 | cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT; | ||
108 | else | ||
109 | cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT; | ||
110 | |||
111 | if (IS_CCM(flags) || IS_CMAC(flags)) { | ||
112 | if (key_size == AES_KEYSIZE_128) | ||
113 | cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT; | ||
114 | else if (key_size == AES_KEYSIZE_256) | ||
115 | cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT; | ||
116 | } | ||
117 | |||
118 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) | ||
119 | cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT; | ||
120 | else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) | ||
121 | cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT; | ||
122 | else if (IS_CMAC(flags)) | ||
123 | cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT; | ||
124 | |||
125 | if (IS_SHA1(flags) || IS_SHA256(flags)) | ||
126 | cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT; | ||
127 | else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) || | ||
128 | IS_CBC(flags) || IS_CTR(flags)) | ||
129 | cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT; | ||
130 | else if (IS_AES(flags) && IS_CCM(flags)) | ||
131 | cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT; | ||
132 | else if (IS_AES(flags) && IS_CMAC(flags)) | ||
133 | cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT; | ||
134 | |||
135 | if (IS_SHA(flags) || IS_SHA_HMAC(flags)) | ||
136 | cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; | ||
137 | |||
138 | if (IS_CCM(flags)) | ||
139 | cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT; | ||
140 | |||
141 | if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) || | ||
142 | IS_CMAC(flags)) | ||
143 | cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT); | ||
144 | |||
145 | return cfg; | ||
146 | } | ||
147 | |||
148 | static u32 qce_config_reg(struct qce_device *qce, int little) | ||
149 | { | ||
150 | u32 beats = (qce->burst_size >> 3) - 1; | ||
151 | u32 pipe_pair = qce->pipe_pair_id; | ||
152 | u32 config; | ||
153 | |||
154 | config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; | ||
155 | config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | | ||
156 | BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); | ||
157 | config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; | ||
158 | config &= ~HIGH_SPD_EN_N_SHIFT; | ||
159 | |||
160 | if (little) | ||
161 | config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); | ||
162 | |||
163 | return config; | ||
164 | } | ||
165 | |||
166 | void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) | ||
167 | { | ||
168 | __be32 *d = dst; | ||
169 | const u8 *s = src; | ||
170 | unsigned int n; | ||
171 | |||
172 | n = len / sizeof(u32); | ||
173 | for (; n > 0; n--) { | ||
174 | *d = cpu_to_be32p((const __u32 *) s); | ||
175 | s += sizeof(__u32); | ||
176 | d++; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) | ||
181 | { | ||
182 | u8 swap[QCE_AES_IV_LENGTH]; | ||
183 | u32 i, j; | ||
184 | |||
185 | if (ivsize > QCE_AES_IV_LENGTH) | ||
186 | return; | ||
187 | |||
188 | memset(swap, 0, QCE_AES_IV_LENGTH); | ||
189 | |||
190 | for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; | ||
191 | i < QCE_AES_IV_LENGTH; i++, j--) | ||
192 | swap[i] = src[j]; | ||
193 | |||
194 | qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); | ||
195 | } | ||
196 | |||
197 | static void qce_xtskey(struct qce_device *qce, const u8 *enckey, | ||
198 | unsigned int enckeylen, unsigned int cryptlen) | ||
199 | { | ||
200 | u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; | ||
201 | unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); | ||
202 | unsigned int xtsdusize; | ||
203 | |||
204 | qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, | ||
205 | enckeylen / 2); | ||
206 | qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); | ||
207 | |||
208 | /* xts du size 512B */ | ||
209 | xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); | ||
210 | qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); | ||
211 | } | ||
212 | |||
213 | static void qce_setup_config(struct qce_device *qce) | ||
214 | { | ||
215 | u32 config; | ||
216 | |||
217 | /* get big endianness */ | ||
218 | config = qce_config_reg(qce, 0); | ||
219 | |||
220 | /* clear status */ | ||
221 | qce_write(qce, REG_STATUS, 0); | ||
222 | qce_write(qce, REG_CONFIG, config); | ||
223 | } | ||
224 | |||
225 | static inline void qce_crypto_go(struct qce_device *qce) | ||
226 | { | ||
227 | qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); | ||
228 | } | ||
229 | |||
230 | static int qce_setup_regs_ahash(struct crypto_async_request *async_req, | ||
231 | u32 totallen, u32 offset) | ||
232 | { | ||
233 | struct ahash_request *req = ahash_request_cast(async_req); | ||
234 | struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); | ||
235 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
236 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | ||
237 | struct qce_device *qce = tmpl->qce; | ||
238 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
239 | unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm); | ||
240 | __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0}; | ||
241 | __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0}; | ||
242 | u32 auth_cfg = 0, config; | ||
243 | unsigned int iv_words; | ||
244 | |||
245 | /* if not the last, the size has to be on the block boundary */ | ||
246 | if (!rctx->last_blk && req->nbytes % blocksize) | ||
247 | return -EINVAL; | ||
248 | |||
249 | qce_setup_config(qce); | ||
250 | |||
251 | if (IS_CMAC(rctx->flags)) { | ||
252 | qce_write(qce, REG_AUTH_SEG_CFG, 0); | ||
253 | qce_write(qce, REG_ENCR_SEG_CFG, 0); | ||
254 | qce_write(qce, REG_ENCR_SEG_SIZE, 0); | ||
255 | qce_clear_array(qce, REG_AUTH_IV0, 16); | ||
256 | qce_clear_array(qce, REG_AUTH_KEY0, 16); | ||
257 | qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); | ||
258 | |||
259 | auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen); | ||
260 | } | ||
261 | |||
262 | if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) { | ||
263 | u32 authkey_words = rctx->authklen / sizeof(u32); | ||
264 | |||
265 | qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); | ||
266 | qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey, | ||
267 | authkey_words); | ||
268 | } | ||
269 | |||
270 | if (IS_CMAC(rctx->flags)) | ||
271 | goto go_proc; | ||
272 | |||
273 | if (rctx->first_blk) | ||
274 | memcpy(auth, rctx->digest, digestsize); | ||
275 | else | ||
276 | qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); | ||
277 | |||
278 | iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; | ||
279 | qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words); | ||
280 | |||
281 | if (rctx->first_blk) | ||
282 | qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); | ||
283 | else | ||
284 | qce_write_array(qce, REG_AUTH_BYTECNT0, | ||
285 | (u32 *)rctx->byte_count, 2); | ||
286 | |||
287 | auth_cfg = qce_auth_cfg(rctx->flags, 0); | ||
288 | |||
289 | if (rctx->last_blk) | ||
290 | auth_cfg |= BIT(AUTH_LAST_SHIFT); | ||
291 | else | ||
292 | auth_cfg &= ~BIT(AUTH_LAST_SHIFT); | ||
293 | |||
294 | if (rctx->first_blk) | ||
295 | auth_cfg |= BIT(AUTH_FIRST_SHIFT); | ||
296 | else | ||
297 | auth_cfg &= ~BIT(AUTH_FIRST_SHIFT); | ||
298 | |||
299 | go_proc: | ||
300 | qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); | ||
301 | qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes); | ||
302 | qce_write(qce, REG_AUTH_SEG_START, 0); | ||
303 | qce_write(qce, REG_ENCR_SEG_CFG, 0); | ||
304 | qce_write(qce, REG_SEG_SIZE, req->nbytes); | ||
305 | |||
306 | /* get little endianness */ | ||
307 | config = qce_config_reg(qce, 1); | ||
308 | qce_write(qce, REG_CONFIG, config); | ||
309 | |||
310 | qce_crypto_go(qce); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, | ||
316 | u32 totallen, u32 offset) | ||
317 | { | ||
318 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | ||
319 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | ||
320 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); | ||
321 | struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); | ||
322 | struct qce_device *qce = tmpl->qce; | ||
323 | __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; | ||
324 | __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; | ||
325 | unsigned int enckey_words, enciv_words; | ||
326 | unsigned int keylen; | ||
327 | u32 encr_cfg = 0, auth_cfg = 0, config; | ||
328 | unsigned int ivsize = rctx->ivsize; | ||
329 | unsigned long flags = rctx->flags; | ||
330 | |||
331 | qce_setup_config(qce); | ||
332 | |||
333 | if (IS_XTS(flags)) | ||
334 | keylen = ctx->enc_keylen / 2; | ||
335 | else | ||
336 | keylen = ctx->enc_keylen; | ||
337 | |||
338 | qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen); | ||
339 | enckey_words = keylen / sizeof(u32); | ||
340 | |||
341 | qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); | ||
342 | |||
343 | encr_cfg = qce_encr_cfg(flags, keylen); | ||
344 | |||
345 | if (IS_DES(flags)) { | ||
346 | enciv_words = 2; | ||
347 | enckey_words = 2; | ||
348 | } else if (IS_3DES(flags)) { | ||
349 | enciv_words = 2; | ||
350 | enckey_words = 6; | ||
351 | } else if (IS_AES(flags)) { | ||
352 | if (IS_XTS(flags)) | ||
353 | qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen, | ||
354 | rctx->cryptlen); | ||
355 | enciv_words = 4; | ||
356 | } else { | ||
357 | return -EINVAL; | ||
358 | } | ||
359 | |||
360 | qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words); | ||
361 | |||
362 | if (!IS_ECB(flags)) { | ||
363 | if (IS_XTS(flags)) | ||
364 | qce_xts_swapiv(enciv, rctx->iv, ivsize); | ||
365 | else | ||
366 | qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); | ||
367 | |||
368 | qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words); | ||
369 | } | ||
370 | |||
371 | if (IS_ENCRYPT(flags)) | ||
372 | encr_cfg |= BIT(ENCODE_SHIFT); | ||
373 | |||
374 | qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); | ||
375 | qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); | ||
376 | qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); | ||
377 | |||
378 | if (IS_CTR(flags)) { | ||
379 | qce_write(qce, REG_CNTR_MASK, ~0); | ||
380 | qce_write(qce, REG_CNTR_MASK0, ~0); | ||
381 | qce_write(qce, REG_CNTR_MASK1, ~0); | ||
382 | qce_write(qce, REG_CNTR_MASK2, ~0); | ||
383 | } | ||
384 | |||
385 | qce_write(qce, REG_SEG_SIZE, totallen); | ||
386 | |||
387 | /* get little endianness */ | ||
388 | config = qce_config_reg(qce, 1); | ||
389 | qce_write(qce, REG_CONFIG, config); | ||
390 | |||
391 | qce_crypto_go(qce); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, | ||
397 | u32 offset) | ||
398 | { | ||
399 | switch (type) { | ||
400 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
401 | return qce_setup_regs_ablkcipher(async_req, totallen, offset); | ||
402 | case CRYPTO_ALG_TYPE_AHASH: | ||
403 | return qce_setup_regs_ahash(async_req, totallen, offset); | ||
404 | default: | ||
405 | return -EINVAL; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | #define STATUS_ERRORS \ | ||
410 | (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) | ||
411 | |||
412 | int qce_check_status(struct qce_device *qce, u32 *status) | ||
413 | { | ||
414 | int ret = 0; | ||
415 | |||
416 | *status = qce_read(qce, REG_STATUS); | ||
417 | |||
418 | /* | ||
419 | * Don't use result dump status. The operation may not be complete. | ||
420 | * Instead, use the status we just read from device. In case, we need to | ||
421 | * use result_status from result dump the result_status needs to be byte | ||
422 | * swapped, since we set the device to little endian. | ||
423 | */ | ||
424 | if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) | ||
425 | ret = -ENXIO; | ||
426 | |||
427 | return ret; | ||
428 | } | ||
429 | |||
430 | void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step) | ||
431 | { | ||
432 | u32 val; | ||
433 | |||
434 | val = qce_read(qce, REG_VERSION); | ||
435 | *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT; | ||
436 | *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT; | ||
437 | *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT; | ||
438 | } | ||
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h new file mode 100644 index 000000000000..a4addd4f7d6c --- /dev/null +++ b/drivers/crypto/qce/common.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _COMMON_H_ | ||
15 | #define _COMMON_H_ | ||
16 | |||
17 | #include <linux/crypto.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <crypto/aes.h> | ||
20 | #include <crypto/hash.h> | ||
21 | |||
22 | /* key size in bytes */ | ||
23 | #define QCE_SHA_HMAC_KEY_SIZE 64 | ||
24 | #define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256 | ||
25 | |||
26 | /* IV length in bytes */ | ||
27 | #define QCE_AES_IV_LENGTH AES_BLOCK_SIZE | ||
28 | /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | ||
29 | #define QCE_MAX_IV_SIZE AES_BLOCK_SIZE | ||
30 | |||
31 | /* maximum nonce bytes */ | ||
32 | #define QCE_MAX_NONCE 16 | ||
33 | #define QCE_MAX_NONCE_WORDS (QCE_MAX_NONCE / sizeof(u32)) | ||
34 | |||
35 | /* burst size alignment requirement */ | ||
36 | #define QCE_MAX_ALIGN_SIZE 64 | ||
37 | |||
38 | /* cipher algorithms */ | ||
39 | #define QCE_ALG_DES BIT(0) | ||
40 | #define QCE_ALG_3DES BIT(1) | ||
41 | #define QCE_ALG_AES BIT(2) | ||
42 | |||
43 | /* hash and hmac algorithms */ | ||
44 | #define QCE_HASH_SHA1 BIT(3) | ||
45 | #define QCE_HASH_SHA256 BIT(4) | ||
46 | #define QCE_HASH_SHA1_HMAC BIT(5) | ||
47 | #define QCE_HASH_SHA256_HMAC BIT(6) | ||
48 | #define QCE_HASH_AES_CMAC BIT(7) | ||
49 | |||
50 | /* cipher modes */ | ||
51 | #define QCE_MODE_CBC BIT(8) | ||
52 | #define QCE_MODE_ECB BIT(9) | ||
53 | #define QCE_MODE_CTR BIT(10) | ||
54 | #define QCE_MODE_XTS BIT(11) | ||
55 | #define QCE_MODE_CCM BIT(12) | ||
56 | #define QCE_MODE_MASK GENMASK(12, 8) | ||
57 | |||
58 | /* cipher encryption/decryption operations */ | ||
59 | #define QCE_ENCRYPT BIT(13) | ||
60 | #define QCE_DECRYPT BIT(14) | ||
61 | |||
62 | #define IS_DES(flags) (flags & QCE_ALG_DES) | ||
63 | #define IS_3DES(flags) (flags & QCE_ALG_3DES) | ||
64 | #define IS_AES(flags) (flags & QCE_ALG_AES) | ||
65 | |||
66 | #define IS_SHA1(flags) (flags & QCE_HASH_SHA1) | ||
67 | #define IS_SHA256(flags) (flags & QCE_HASH_SHA256) | ||
68 | #define IS_SHA1_HMAC(flags) (flags & QCE_HASH_SHA1_HMAC) | ||
69 | #define IS_SHA256_HMAC(flags) (flags & QCE_HASH_SHA256_HMAC) | ||
70 | #define IS_CMAC(flags) (flags & QCE_HASH_AES_CMAC) | ||
71 | #define IS_SHA(flags) (IS_SHA1(flags) || IS_SHA256(flags)) | ||
72 | #define IS_SHA_HMAC(flags) \ | ||
73 | (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags)) | ||
74 | |||
75 | #define IS_CBC(mode) (mode & QCE_MODE_CBC) | ||
76 | #define IS_ECB(mode) (mode & QCE_MODE_ECB) | ||
77 | #define IS_CTR(mode) (mode & QCE_MODE_CTR) | ||
78 | #define IS_XTS(mode) (mode & QCE_MODE_XTS) | ||
79 | #define IS_CCM(mode) (mode & QCE_MODE_CCM) | ||
80 | |||
81 | #define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT) | ||
82 | #define IS_DECRYPT(dir) (dir & QCE_DECRYPT) | ||
83 | |||
84 | struct qce_alg_template { | ||
85 | struct list_head entry; | ||
86 | u32 crypto_alg_type; | ||
87 | unsigned long alg_flags; | ||
88 | const u32 *std_iv; | ||
89 | union { | ||
90 | struct crypto_alg crypto; | ||
91 | struct ahash_alg ahash; | ||
92 | } alg; | ||
93 | struct qce_device *qce; | ||
94 | }; | ||
95 | |||
96 | void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); | ||
97 | int qce_check_status(struct qce_device *qce, u32 *status); | ||
98 | void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step); | ||
99 | int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, | ||
100 | u32 offset); | ||
101 | |||
102 | #endif /* _COMMON_H_ */ | ||
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c new file mode 100644 index 000000000000..33ae3545dc48 --- /dev/null +++ b/drivers/crypto/qce/core.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <crypto/algapi.h> | ||
21 | #include <crypto/internal/hash.h> | ||
22 | #include <crypto/sha.h> | ||
23 | |||
24 | #include "core.h" | ||
25 | #include "cipher.h" | ||
26 | #include "sha.h" | ||
27 | |||
28 | #define QCE_MAJOR_VERSION5 0x05 | ||
29 | #define QCE_QUEUE_LENGTH 1 | ||
30 | |||
31 | static const struct qce_algo_ops *qce_ops[] = { | ||
32 | &ablkcipher_ops, | ||
33 | &ahash_ops, | ||
34 | }; | ||
35 | |||
36 | static void qce_unregister_algs(struct qce_device *qce) | ||
37 | { | ||
38 | const struct qce_algo_ops *ops; | ||
39 | int i; | ||
40 | |||
41 | for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { | ||
42 | ops = qce_ops[i]; | ||
43 | ops->unregister_algs(qce); | ||
44 | } | ||
45 | } | ||
46 | |||
47 | static int qce_register_algs(struct qce_device *qce) | ||
48 | { | ||
49 | const struct qce_algo_ops *ops; | ||
50 | int i, ret = -ENODEV; | ||
51 | |||
52 | for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { | ||
53 | ops = qce_ops[i]; | ||
54 | ret = ops->register_algs(qce); | ||
55 | if (ret) | ||
56 | break; | ||
57 | } | ||
58 | |||
59 | return ret; | ||
60 | } | ||
61 | |||
62 | static int qce_handle_request(struct crypto_async_request *async_req) | ||
63 | { | ||
64 | int ret = -EINVAL, i; | ||
65 | const struct qce_algo_ops *ops; | ||
66 | u32 type = crypto_tfm_alg_type(async_req->tfm); | ||
67 | |||
68 | for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { | ||
69 | ops = qce_ops[i]; | ||
70 | if (type != ops->type) | ||
71 | continue; | ||
72 | ret = ops->async_req_handle(async_req); | ||
73 | break; | ||
74 | } | ||
75 | |||
76 | return ret; | ||
77 | } | ||
78 | |||
79 | static int qce_handle_queue(struct qce_device *qce, | ||
80 | struct crypto_async_request *req) | ||
81 | { | ||
82 | struct crypto_async_request *async_req, *backlog; | ||
83 | unsigned long flags; | ||
84 | int ret = 0, err; | ||
85 | |||
86 | spin_lock_irqsave(&qce->lock, flags); | ||
87 | |||
88 | if (req) | ||
89 | ret = crypto_enqueue_request(&qce->queue, req); | ||
90 | |||
91 | /* busy, do not dequeue request */ | ||
92 | if (qce->req) { | ||
93 | spin_unlock_irqrestore(&qce->lock, flags); | ||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | backlog = crypto_get_backlog(&qce->queue); | ||
98 | async_req = crypto_dequeue_request(&qce->queue); | ||
99 | if (async_req) | ||
100 | qce->req = async_req; | ||
101 | |||
102 | spin_unlock_irqrestore(&qce->lock, flags); | ||
103 | |||
104 | if (!async_req) | ||
105 | return ret; | ||
106 | |||
107 | if (backlog) { | ||
108 | spin_lock_bh(&qce->lock); | ||
109 | backlog->complete(backlog, -EINPROGRESS); | ||
110 | spin_unlock_bh(&qce->lock); | ||
111 | } | ||
112 | |||
113 | err = qce_handle_request(async_req); | ||
114 | if (err) { | ||
115 | qce->result = err; | ||
116 | tasklet_schedule(&qce->done_tasklet); | ||
117 | } | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static void qce_tasklet_req_done(unsigned long data) | ||
123 | { | ||
124 | struct qce_device *qce = (struct qce_device *)data; | ||
125 | struct crypto_async_request *req; | ||
126 | unsigned long flags; | ||
127 | |||
128 | spin_lock_irqsave(&qce->lock, flags); | ||
129 | req = qce->req; | ||
130 | qce->req = NULL; | ||
131 | spin_unlock_irqrestore(&qce->lock, flags); | ||
132 | |||
133 | if (req) | ||
134 | req->complete(req, qce->result); | ||
135 | |||
136 | qce_handle_queue(qce, NULL); | ||
137 | } | ||
138 | |||
139 | static int qce_async_request_enqueue(struct qce_device *qce, | ||
140 | struct crypto_async_request *req) | ||
141 | { | ||
142 | return qce_handle_queue(qce, req); | ||
143 | } | ||
144 | |||
145 | static void qce_async_request_done(struct qce_device *qce, int ret) | ||
146 | { | ||
147 | qce->result = ret; | ||
148 | tasklet_schedule(&qce->done_tasklet); | ||
149 | } | ||
150 | |||
151 | static int qce_check_version(struct qce_device *qce) | ||
152 | { | ||
153 | u32 major, minor, step; | ||
154 | |||
155 | qce_get_version(qce, &major, &minor, &step); | ||
156 | |||
157 | /* | ||
158 | * the driver does not support v5 with minor 0 because it has special | ||
159 | * alignment requirements. | ||
160 | */ | ||
161 | if (major != QCE_MAJOR_VERSION5 || minor == 0) | ||
162 | return -ENODEV; | ||
163 | |||
164 | qce->burst_size = QCE_BAM_BURST_SIZE; | ||
165 | qce->pipe_pair_id = 1; | ||
166 | |||
167 | dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", | ||
168 | major, minor, step); | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int qce_crypto_probe(struct platform_device *pdev) | ||
174 | { | ||
175 | struct device *dev = &pdev->dev; | ||
176 | struct qce_device *qce; | ||
177 | struct resource *res; | ||
178 | int ret; | ||
179 | |||
180 | qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); | ||
181 | if (!qce) | ||
182 | return -ENOMEM; | ||
183 | |||
184 | qce->dev = dev; | ||
185 | platform_set_drvdata(pdev, qce); | ||
186 | |||
187 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
188 | qce->base = devm_ioremap_resource(&pdev->dev, res); | ||
189 | if (IS_ERR(qce->base)) | ||
190 | return PTR_ERR(qce->base); | ||
191 | |||
192 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
193 | if (ret < 0) | ||
194 | return ret; | ||
195 | |||
196 | qce->core = devm_clk_get(qce->dev, "core"); | ||
197 | if (IS_ERR(qce->core)) | ||
198 | return PTR_ERR(qce->core); | ||
199 | |||
200 | qce->iface = devm_clk_get(qce->dev, "iface"); | ||
201 | if (IS_ERR(qce->iface)) | ||
202 | return PTR_ERR(qce->iface); | ||
203 | |||
204 | qce->bus = devm_clk_get(qce->dev, "bus"); | ||
205 | if (IS_ERR(qce->bus)) | ||
206 | return PTR_ERR(qce->bus); | ||
207 | |||
208 | ret = clk_prepare_enable(qce->core); | ||
209 | if (ret) | ||
210 | return ret; | ||
211 | |||
212 | ret = clk_prepare_enable(qce->iface); | ||
213 | if (ret) | ||
214 | goto err_clks_core; | ||
215 | |||
216 | ret = clk_prepare_enable(qce->bus); | ||
217 | if (ret) | ||
218 | goto err_clks_iface; | ||
219 | |||
220 | ret = qce_dma_request(qce->dev, &qce->dma); | ||
221 | if (ret) | ||
222 | goto err_clks; | ||
223 | |||
224 | ret = qce_check_version(qce); | ||
225 | if (ret) | ||
226 | goto err_clks; | ||
227 | |||
228 | spin_lock_init(&qce->lock); | ||
229 | tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, | ||
230 | (unsigned long)qce); | ||
231 | crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); | ||
232 | |||
233 | qce->async_req_enqueue = qce_async_request_enqueue; | ||
234 | qce->async_req_done = qce_async_request_done; | ||
235 | |||
236 | ret = qce_register_algs(qce); | ||
237 | if (ret) | ||
238 | goto err_dma; | ||
239 | |||
240 | return 0; | ||
241 | |||
242 | err_dma: | ||
243 | qce_dma_release(&qce->dma); | ||
244 | err_clks: | ||
245 | clk_disable_unprepare(qce->bus); | ||
246 | err_clks_iface: | ||
247 | clk_disable_unprepare(qce->iface); | ||
248 | err_clks_core: | ||
249 | clk_disable_unprepare(qce->core); | ||
250 | return ret; | ||
251 | } | ||
252 | |||
253 | static int qce_crypto_remove(struct platform_device *pdev) | ||
254 | { | ||
255 | struct qce_device *qce = platform_get_drvdata(pdev); | ||
256 | |||
257 | tasklet_kill(&qce->done_tasklet); | ||
258 | qce_unregister_algs(qce); | ||
259 | qce_dma_release(&qce->dma); | ||
260 | clk_disable_unprepare(qce->bus); | ||
261 | clk_disable_unprepare(qce->iface); | ||
262 | clk_disable_unprepare(qce->core); | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static const struct of_device_id qce_crypto_of_match[] = { | ||
267 | { .compatible = "qcom,crypto-v5.1", }, | ||
268 | {} | ||
269 | }; | ||
270 | MODULE_DEVICE_TABLE(of, qce_crypto_of_match); | ||
271 | |||
272 | static struct platform_driver qce_crypto_driver = { | ||
273 | .probe = qce_crypto_probe, | ||
274 | .remove = qce_crypto_remove, | ||
275 | .driver = { | ||
276 | .owner = THIS_MODULE, | ||
277 | .name = KBUILD_MODNAME, | ||
278 | .of_match_table = qce_crypto_of_match, | ||
279 | }, | ||
280 | }; | ||
281 | module_platform_driver(qce_crypto_driver); | ||
282 | |||
283 | MODULE_LICENSE("GPL v2"); | ||
284 | MODULE_DESCRIPTION("Qualcomm crypto engine driver"); | ||
285 | MODULE_ALIAS("platform:" KBUILD_MODNAME); | ||
286 | MODULE_AUTHOR("The Linux Foundation"); | ||
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h new file mode 100644 index 000000000000..549965d4d91f --- /dev/null +++ b/drivers/crypto/qce/core.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _CORE_H_ | ||
15 | #define _CORE_H_ | ||
16 | |||
17 | #include "dma.h" | ||
18 | |||
19 | /** | ||
20 | * struct qce_device - crypto engine device structure | ||
21 | * @queue: crypto request queue | ||
22 | * @lock: the lock protects queue and req | ||
23 | * @done_tasklet: done tasklet object | ||
24 | * @req: current active request | ||
25 | * @result: result of current transform | ||
26 | * @base: virtual IO base | ||
27 | * @dev: pointer to device structure | ||
28 | * @core: core device clock | ||
29 | * @iface: interface clock | ||
30 | * @bus: bus clock | ||
31 | * @dma: pointer to dma data | ||
32 | * @burst_size: the crypto burst size | ||
33 | * @pipe_pair_id: which pipe pair id the device using | ||
34 | * @async_req_enqueue: invoked by every algorithm to enqueue a request | ||
35 | * @async_req_done: invoked by every algorithm to finish its request | ||
36 | */ | ||
37 | struct qce_device { | ||
38 | struct crypto_queue queue; | ||
39 | spinlock_t lock; | ||
40 | struct tasklet_struct done_tasklet; | ||
41 | struct crypto_async_request *req; | ||
42 | int result; | ||
43 | void __iomem *base; | ||
44 | struct device *dev; | ||
45 | struct clk *core, *iface, *bus; | ||
46 | struct qce_dma_data dma; | ||
47 | int burst_size; | ||
48 | unsigned int pipe_pair_id; | ||
49 | int (*async_req_enqueue)(struct qce_device *qce, | ||
50 | struct crypto_async_request *req); | ||
51 | void (*async_req_done)(struct qce_device *qce, int ret); | ||
52 | }; | ||
53 | |||
54 | /** | ||
55 | * struct qce_algo_ops - algorithm operations per crypto type | ||
56 | * @type: should be CRYPTO_ALG_TYPE_XXX | ||
57 | * @register_algs: invoked by core to register the algorithms | ||
58 | * @unregister_algs: invoked by core to unregister the algorithms | ||
59 | * @async_req_handle: invoked by core to handle enqueued request | ||
60 | */ | ||
61 | struct qce_algo_ops { | ||
62 | u32 type; | ||
63 | int (*register_algs)(struct qce_device *qce); | ||
64 | void (*unregister_algs)(struct qce_device *qce); | ||
65 | int (*async_req_handle)(struct crypto_async_request *async_req); | ||
66 | }; | ||
67 | |||
68 | #endif /* _CORE_H_ */ | ||
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c new file mode 100644 index 000000000000..0fb21e13f247 --- /dev/null +++ b/drivers/crypto/qce/dma.c | |||
@@ -0,0 +1,186 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/dmaengine.h> | ||
15 | #include <crypto/scatterwalk.h> | ||
16 | |||
17 | #include "dma.h" | ||
18 | |||
19 | int qce_dma_request(struct device *dev, struct qce_dma_data *dma) | ||
20 | { | ||
21 | int ret; | ||
22 | |||
23 | dma->txchan = dma_request_slave_channel_reason(dev, "tx"); | ||
24 | if (IS_ERR(dma->txchan)) | ||
25 | return PTR_ERR(dma->txchan); | ||
26 | |||
27 | dma->rxchan = dma_request_slave_channel_reason(dev, "rx"); | ||
28 | if (IS_ERR(dma->rxchan)) { | ||
29 | ret = PTR_ERR(dma->rxchan); | ||
30 | goto error_rx; | ||
31 | } | ||
32 | |||
33 | dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ, | ||
34 | GFP_KERNEL); | ||
35 | if (!dma->result_buf) { | ||
36 | ret = -ENOMEM; | ||
37 | goto error_nomem; | ||
38 | } | ||
39 | |||
40 | dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ; | ||
41 | |||
42 | return 0; | ||
43 | error_nomem: | ||
44 | dma_release_channel(dma->rxchan); | ||
45 | error_rx: | ||
46 | dma_release_channel(dma->txchan); | ||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | void qce_dma_release(struct qce_dma_data *dma) | ||
51 | { | ||
52 | dma_release_channel(dma->txchan); | ||
53 | dma_release_channel(dma->rxchan); | ||
54 | kfree(dma->result_buf); | ||
55 | } | ||
56 | |||
57 | int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, | ||
58 | enum dma_data_direction dir, bool chained) | ||
59 | { | ||
60 | int err; | ||
61 | |||
62 | if (chained) { | ||
63 | while (sg) { | ||
64 | err = dma_map_sg(dev, sg, 1, dir); | ||
65 | if (!err) | ||
66 | return -EFAULT; | ||
67 | sg = scatterwalk_sg_next(sg); | ||
68 | } | ||
69 | } else { | ||
70 | err = dma_map_sg(dev, sg, nents, dir); | ||
71 | if (!err) | ||
72 | return -EFAULT; | ||
73 | } | ||
74 | |||
75 | return nents; | ||
76 | } | ||
77 | |||
78 | void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, | ||
79 | enum dma_data_direction dir, bool chained) | ||
80 | { | ||
81 | if (chained) | ||
82 | while (sg) { | ||
83 | dma_unmap_sg(dev, sg, 1, dir); | ||
84 | sg = scatterwalk_sg_next(sg); | ||
85 | } | ||
86 | else | ||
87 | dma_unmap_sg(dev, sg, nents, dir); | ||
88 | } | ||
89 | |||
90 | int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained) | ||
91 | { | ||
92 | struct scatterlist *sg = sglist; | ||
93 | int nents = 0; | ||
94 | |||
95 | if (chained) | ||
96 | *chained = false; | ||
97 | |||
98 | while (nbytes > 0 && sg) { | ||
99 | nents++; | ||
100 | nbytes -= sg->length; | ||
101 | if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) | ||
102 | *chained = true; | ||
103 | sg = scatterwalk_sg_next(sg); | ||
104 | } | ||
105 | |||
106 | return nents; | ||
107 | } | ||
108 | |||
109 | struct scatterlist * | ||
110 | qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) | ||
111 | { | ||
112 | struct scatterlist *sg = sgt->sgl, *sg_last = NULL; | ||
113 | |||
114 | while (sg) { | ||
115 | if (!sg_page(sg)) | ||
116 | break; | ||
117 | sg = sg_next(sg); | ||
118 | } | ||
119 | |||
120 | if (!sg) | ||
121 | return ERR_PTR(-EINVAL); | ||
122 | |||
123 | while (new_sgl && sg) { | ||
124 | sg_set_page(sg, sg_page(new_sgl), new_sgl->length, | ||
125 | new_sgl->offset); | ||
126 | sg_last = sg; | ||
127 | sg = sg_next(sg); | ||
128 | new_sgl = sg_next(new_sgl); | ||
129 | } | ||
130 | |||
131 | return sg_last; | ||
132 | } | ||
133 | |||
134 | static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg, | ||
135 | int nents, unsigned long flags, | ||
136 | enum dma_transfer_direction dir, | ||
137 | dma_async_tx_callback cb, void *cb_param) | ||
138 | { | ||
139 | struct dma_async_tx_descriptor *desc; | ||
140 | dma_cookie_t cookie; | ||
141 | |||
142 | if (!sg || !nents) | ||
143 | return -EINVAL; | ||
144 | |||
145 | desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags); | ||
146 | if (!desc) | ||
147 | return -EINVAL; | ||
148 | |||
149 | desc->callback = cb; | ||
150 | desc->callback_param = cb_param; | ||
151 | cookie = dmaengine_submit(desc); | ||
152 | |||
153 | return dma_submit_error(cookie); | ||
154 | } | ||
155 | |||
156 | int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg, | ||
157 | int rx_nents, struct scatterlist *tx_sg, int tx_nents, | ||
158 | dma_async_tx_callback cb, void *cb_param) | ||
159 | { | ||
160 | struct dma_chan *rxchan = dma->rxchan; | ||
161 | struct dma_chan *txchan = dma->txchan; | ||
162 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; | ||
163 | int ret; | ||
164 | |||
165 | ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV, | ||
166 | NULL, NULL); | ||
167 | if (ret) | ||
168 | return ret; | ||
169 | |||
170 | return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM, | ||
171 | cb, cb_param); | ||
172 | } | ||
173 | |||
174 | void qce_dma_issue_pending(struct qce_dma_data *dma) | ||
175 | { | ||
176 | dma_async_issue_pending(dma->rxchan); | ||
177 | dma_async_issue_pending(dma->txchan); | ||
178 | } | ||
179 | |||
180 | int qce_dma_terminate_all(struct qce_dma_data *dma) | ||
181 | { | ||
182 | int ret; | ||
183 | |||
184 | ret = dmaengine_terminate_all(dma->rxchan); | ||
185 | return ret ?: dmaengine_terminate_all(dma->txchan); | ||
186 | } | ||
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h new file mode 100644 index 000000000000..805e378d59e9 --- /dev/null +++ b/drivers/crypto/qce/dma.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _DMA_H_ | ||
15 | #define _DMA_H_ | ||
16 | |||
17 | /* maximum data transfer block size between BAM and CE */ | ||
18 | #define QCE_BAM_BURST_SIZE 64 | ||
19 | |||
20 | #define QCE_AUTHIV_REGS_CNT 16 | ||
21 | #define QCE_AUTH_BYTECOUNT_REGS_CNT 4 | ||
22 | #define QCE_CNTRIV_REGS_CNT 4 | ||
23 | |||
24 | struct qce_result_dump { | ||
25 | u32 auth_iv[QCE_AUTHIV_REGS_CNT]; | ||
26 | u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT]; | ||
27 | u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT]; | ||
28 | u32 status; | ||
29 | u32 status2; | ||
30 | }; | ||
31 | |||
32 | #define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE) | ||
33 | #define QCE_RESULT_BUF_SZ \ | ||
34 | ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE) | ||
35 | |||
36 | struct qce_dma_data { | ||
37 | struct dma_chan *txchan; | ||
38 | struct dma_chan *rxchan; | ||
39 | struct qce_result_dump *result_buf; | ||
40 | void *ignore_buf; | ||
41 | }; | ||
42 | |||
43 | int qce_dma_request(struct device *dev, struct qce_dma_data *dma); | ||
44 | void qce_dma_release(struct qce_dma_data *dma); | ||
45 | int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in, | ||
46 | int in_ents, struct scatterlist *sg_out, int out_ents, | ||
47 | dma_async_tx_callback cb, void *cb_param); | ||
48 | void qce_dma_issue_pending(struct qce_dma_data *dma); | ||
49 | int qce_dma_terminate_all(struct qce_dma_data *dma); | ||
50 | int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained); | ||
51 | void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, | ||
52 | enum dma_data_direction dir, bool chained); | ||
53 | int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, | ||
54 | enum dma_data_direction dir, bool chained); | ||
55 | struct scatterlist * | ||
56 | qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); | ||
57 | |||
58 | #endif /* _DMA_H_ */ | ||
diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h new file mode 100644 index 000000000000..f0e19e35664a --- /dev/null +++ b/drivers/crypto/qce/regs-v5.h | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _REGS_V5_H_ | ||
15 | #define _REGS_V5_H_ | ||
16 | |||
17 | #include <linux/bitops.h> | ||
18 | |||
19 | #define REG_VERSION 0x000 | ||
20 | #define REG_STATUS 0x100 | ||
21 | #define REG_STATUS2 0x104 | ||
22 | #define REG_ENGINES_AVAIL 0x108 | ||
23 | #define REG_FIFO_SIZES 0x10c | ||
24 | #define REG_SEG_SIZE 0x110 | ||
25 | #define REG_GOPROC 0x120 | ||
26 | #define REG_ENCR_SEG_CFG 0x200 | ||
27 | #define REG_ENCR_SEG_SIZE 0x204 | ||
28 | #define REG_ENCR_SEG_START 0x208 | ||
29 | #define REG_CNTR0_IV0 0x20c | ||
30 | #define REG_CNTR1_IV1 0x210 | ||
31 | #define REG_CNTR2_IV2 0x214 | ||
32 | #define REG_CNTR3_IV3 0x218 | ||
33 | #define REG_CNTR_MASK 0x21C | ||
34 | #define REG_ENCR_CCM_INT_CNTR0 0x220 | ||
35 | #define REG_ENCR_CCM_INT_CNTR1 0x224 | ||
36 | #define REG_ENCR_CCM_INT_CNTR2 0x228 | ||
37 | #define REG_ENCR_CCM_INT_CNTR3 0x22c | ||
38 | #define REG_ENCR_XTS_DU_SIZE 0x230 | ||
39 | #define REG_CNTR_MASK2 0x234 | ||
40 | #define REG_CNTR_MASK1 0x238 | ||
41 | #define REG_CNTR_MASK0 0x23c | ||
42 | #define REG_AUTH_SEG_CFG 0x300 | ||
43 | #define REG_AUTH_SEG_SIZE 0x304 | ||
44 | #define REG_AUTH_SEG_START 0x308 | ||
45 | #define REG_AUTH_IV0 0x310 | ||
46 | #define REG_AUTH_IV1 0x314 | ||
47 | #define REG_AUTH_IV2 0x318 | ||
48 | #define REG_AUTH_IV3 0x31c | ||
49 | #define REG_AUTH_IV4 0x320 | ||
50 | #define REG_AUTH_IV5 0x324 | ||
51 | #define REG_AUTH_IV6 0x328 | ||
52 | #define REG_AUTH_IV7 0x32c | ||
53 | #define REG_AUTH_IV8 0x330 | ||
54 | #define REG_AUTH_IV9 0x334 | ||
55 | #define REG_AUTH_IV10 0x338 | ||
56 | #define REG_AUTH_IV11 0x33c | ||
57 | #define REG_AUTH_IV12 0x340 | ||
58 | #define REG_AUTH_IV13 0x344 | ||
59 | #define REG_AUTH_IV14 0x348 | ||
60 | #define REG_AUTH_IV15 0x34c | ||
61 | #define REG_AUTH_INFO_NONCE0 0x350 | ||
62 | #define REG_AUTH_INFO_NONCE1 0x354 | ||
63 | #define REG_AUTH_INFO_NONCE2 0x358 | ||
64 | #define REG_AUTH_INFO_NONCE3 0x35c | ||
65 | #define REG_AUTH_BYTECNT0 0x390 | ||
66 | #define REG_AUTH_BYTECNT1 0x394 | ||
67 | #define REG_AUTH_BYTECNT2 0x398 | ||
68 | #define REG_AUTH_BYTECNT3 0x39c | ||
69 | #define REG_AUTH_EXP_MAC0 0x3a0 | ||
70 | #define REG_AUTH_EXP_MAC1 0x3a4 | ||
71 | #define REG_AUTH_EXP_MAC2 0x3a8 | ||
72 | #define REG_AUTH_EXP_MAC3 0x3ac | ||
73 | #define REG_AUTH_EXP_MAC4 0x3b0 | ||
74 | #define REG_AUTH_EXP_MAC5 0x3b4 | ||
75 | #define REG_AUTH_EXP_MAC6 0x3b8 | ||
76 | #define REG_AUTH_EXP_MAC7 0x3bc | ||
77 | #define REG_CONFIG 0x400 | ||
78 | #define REG_GOPROC_QC_KEY 0x1000 | ||
79 | #define REG_GOPROC_OEM_KEY 0x2000 | ||
80 | #define REG_ENCR_KEY0 0x3000 | ||
81 | #define REG_ENCR_KEY1 0x3004 | ||
82 | #define REG_ENCR_KEY2 0x3008 | ||
83 | #define REG_ENCR_KEY3 0x300c | ||
84 | #define REG_ENCR_KEY4 0x3010 | ||
85 | #define REG_ENCR_KEY5 0x3014 | ||
86 | #define REG_ENCR_KEY6 0x3018 | ||
87 | #define REG_ENCR_KEY7 0x301c | ||
88 | #define REG_ENCR_XTS_KEY0 0x3020 | ||
89 | #define REG_ENCR_XTS_KEY1 0x3024 | ||
90 | #define REG_ENCR_XTS_KEY2 0x3028 | ||
91 | #define REG_ENCR_XTS_KEY3 0x302c | ||
92 | #define REG_ENCR_XTS_KEY4 0x3030 | ||
93 | #define REG_ENCR_XTS_KEY5 0x3034 | ||
94 | #define REG_ENCR_XTS_KEY6 0x3038 | ||
95 | #define REG_ENCR_XTS_KEY7 0x303c | ||
96 | #define REG_AUTH_KEY0 0x3040 | ||
97 | #define REG_AUTH_KEY1 0x3044 | ||
98 | #define REG_AUTH_KEY2 0x3048 | ||
99 | #define REG_AUTH_KEY3 0x304c | ||
100 | #define REG_AUTH_KEY4 0x3050 | ||
101 | #define REG_AUTH_KEY5 0x3054 | ||
102 | #define REG_AUTH_KEY6 0x3058 | ||
103 | #define REG_AUTH_KEY7 0x305c | ||
104 | #define REG_AUTH_KEY8 0x3060 | ||
105 | #define REG_AUTH_KEY9 0x3064 | ||
106 | #define REG_AUTH_KEY10 0x3068 | ||
107 | #define REG_AUTH_KEY11 0x306c | ||
108 | #define REG_AUTH_KEY12 0x3070 | ||
109 | #define REG_AUTH_KEY13 0x3074 | ||
110 | #define REG_AUTH_KEY14 0x3078 | ||
111 | #define REG_AUTH_KEY15 0x307c | ||
112 | |||
113 | /* Register bits - REG_VERSION */ | ||
114 | #define CORE_STEP_REV_SHIFT 0 | ||
115 | #define CORE_STEP_REV_MASK GENMASK(15, 0) | ||
116 | #define CORE_MINOR_REV_SHIFT 16 | ||
117 | #define CORE_MINOR_REV_MASK GENMASK(23, 16) | ||
118 | #define CORE_MAJOR_REV_SHIFT 24 | ||
119 | #define CORE_MAJOR_REV_MASK GENMASK(31, 24) | ||
120 | |||
121 | /* Register bits - REG_STATUS */ | ||
122 | #define MAC_FAILED_SHIFT 31 | ||
123 | #define DOUT_SIZE_AVAIL_SHIFT 26 | ||
124 | #define DOUT_SIZE_AVAIL_MASK GENMASK(30, 26) | ||
125 | #define DIN_SIZE_AVAIL_SHIFT 21 | ||
126 | #define DIN_SIZE_AVAIL_MASK GENMASK(25, 21) | ||
127 | #define HSD_ERR_SHIFT 20 | ||
128 | #define ACCESS_VIOL_SHIFT 19 | ||
129 | #define PIPE_ACTIVE_ERR_SHIFT 18 | ||
130 | #define CFG_CHNG_ERR_SHIFT 17 | ||
131 | #define DOUT_ERR_SHIFT 16 | ||
132 | #define DIN_ERR_SHIFT 15 | ||
133 | #define AXI_ERR_SHIFT 14 | ||
134 | #define CRYPTO_STATE_SHIFT 10 | ||
135 | #define CRYPTO_STATE_MASK GENMASK(13, 10) | ||
136 | #define ENCR_BUSY_SHIFT 9 | ||
137 | #define AUTH_BUSY_SHIFT 8 | ||
138 | #define DOUT_INTR_SHIFT 7 | ||
139 | #define DIN_INTR_SHIFT 6 | ||
140 | #define OP_DONE_INTR_SHIFT 5 | ||
141 | #define ERR_INTR_SHIFT 4 | ||
142 | #define DOUT_RDY_SHIFT 3 | ||
143 | #define DIN_RDY_SHIFT 2 | ||
144 | #define OPERATION_DONE_SHIFT 1 | ||
145 | #define SW_ERR_SHIFT 0 | ||
146 | |||
147 | /* Register bits - REG_STATUS2 */ | ||
148 | #define AXI_EXTRA_SHIFT 1 | ||
149 | #define LOCKED_SHIFT 2 | ||
150 | |||
151 | /* Register bits - REG_CONFIG */ | ||
152 | #define REQ_SIZE_SHIFT 17 | ||
153 | #define REQ_SIZE_MASK GENMASK(20, 17) | ||
154 | #define REQ_SIZE_ENUM_1_BEAT 0 | ||
155 | #define REQ_SIZE_ENUM_2_BEAT 1 | ||
156 | #define REQ_SIZE_ENUM_3_BEAT 2 | ||
157 | #define REQ_SIZE_ENUM_4_BEAT 3 | ||
158 | #define REQ_SIZE_ENUM_5_BEAT 4 | ||
159 | #define REQ_SIZE_ENUM_6_BEAT 5 | ||
160 | #define REQ_SIZE_ENUM_7_BEAT 6 | ||
161 | #define REQ_SIZE_ENUM_8_BEAT 7 | ||
162 | #define REQ_SIZE_ENUM_9_BEAT 8 | ||
163 | #define REQ_SIZE_ENUM_10_BEAT 9 | ||
164 | #define REQ_SIZE_ENUM_11_BEAT 10 | ||
165 | #define REQ_SIZE_ENUM_12_BEAT 11 | ||
166 | #define REQ_SIZE_ENUM_13_BEAT 12 | ||
167 | #define REQ_SIZE_ENUM_14_BEAT 13 | ||
168 | #define REQ_SIZE_ENUM_15_BEAT 14 | ||
169 | #define REQ_SIZE_ENUM_16_BEAT 15 | ||
170 | |||
171 | #define MAX_QUEUED_REQ_SHIFT 14 | ||
172 | #define MAX_QUEUED_REQ_MASK GENMASK(24, 16) | ||
173 | #define ENUM_1_QUEUED_REQS 0 | ||
174 | #define ENUM_2_QUEUED_REQS 1 | ||
175 | #define ENUM_3_QUEUED_REQS 2 | ||
176 | |||
177 | #define IRQ_ENABLES_SHIFT 10 | ||
178 | #define IRQ_ENABLES_MASK GENMASK(13, 10) | ||
179 | |||
180 | #define LITTLE_ENDIAN_MODE_SHIFT 9 | ||
181 | #define PIPE_SET_SELECT_SHIFT 5 | ||
182 | #define PIPE_SET_SELECT_MASK GENMASK(8, 5) | ||
183 | |||
184 | #define HIGH_SPD_EN_N_SHIFT 4 | ||
185 | #define MASK_DOUT_INTR_SHIFT 3 | ||
186 | #define MASK_DIN_INTR_SHIFT 2 | ||
187 | #define MASK_OP_DONE_INTR_SHIFT 1 | ||
188 | #define MASK_ERR_INTR_SHIFT 0 | ||
189 | |||
190 | /* Register bits - REG_AUTH_SEG_CFG */ | ||
191 | #define COMP_EXP_MAC_SHIFT 24 | ||
192 | #define COMP_EXP_MAC_DISABLED 0 | ||
193 | #define COMP_EXP_MAC_ENABLED 1 | ||
194 | |||
195 | #define F9_DIRECTION_SHIFT 23 | ||
196 | #define F9_DIRECTION_UPLINK 0 | ||
197 | #define F9_DIRECTION_DOWNLINK 1 | ||
198 | |||
199 | #define AUTH_NONCE_NUM_WORDS_SHIFT 20 | ||
200 | #define AUTH_NONCE_NUM_WORDS_MASK GENMASK(22, 20) | ||
201 | |||
202 | #define USE_PIPE_KEY_AUTH_SHIFT 19 | ||
203 | #define USE_HW_KEY_AUTH_SHIFT 18 | ||
204 | #define AUTH_FIRST_SHIFT 17 | ||
205 | #define AUTH_LAST_SHIFT 16 | ||
206 | |||
207 | #define AUTH_POS_SHIFT 14 | ||
208 | #define AUTH_POS_MASK GENMASK(15, 14) | ||
209 | #define AUTH_POS_BEFORE 0 | ||
210 | #define AUTH_POS_AFTER 1 | ||
211 | |||
212 | #define AUTH_SIZE_SHIFT 9 | ||
213 | #define AUTH_SIZE_MASK GENMASK(13, 9) | ||
214 | #define AUTH_SIZE_SHA1 0 | ||
215 | #define AUTH_SIZE_SHA256 1 | ||
216 | #define AUTH_SIZE_ENUM_1_BYTES 0 | ||
217 | #define AUTH_SIZE_ENUM_2_BYTES 1 | ||
218 | #define AUTH_SIZE_ENUM_3_BYTES 2 | ||
219 | #define AUTH_SIZE_ENUM_4_BYTES 3 | ||
220 | #define AUTH_SIZE_ENUM_5_BYTES 4 | ||
221 | #define AUTH_SIZE_ENUM_6_BYTES 5 | ||
222 | #define AUTH_SIZE_ENUM_7_BYTES 6 | ||
223 | #define AUTH_SIZE_ENUM_8_BYTES 7 | ||
224 | #define AUTH_SIZE_ENUM_9_BYTES 8 | ||
225 | #define AUTH_SIZE_ENUM_10_BYTES 9 | ||
226 | #define AUTH_SIZE_ENUM_11_BYTES 10 | ||
227 | #define AUTH_SIZE_ENUM_12_BYTES 11 | ||
228 | #define AUTH_SIZE_ENUM_13_BYTES 12 | ||
229 | #define AUTH_SIZE_ENUM_14_BYTES 13 | ||
230 | #define AUTH_SIZE_ENUM_15_BYTES 14 | ||
231 | #define AUTH_SIZE_ENUM_16_BYTES 15 | ||
232 | |||
233 | #define AUTH_MODE_SHIFT 6 | ||
234 | #define AUTH_MODE_MASK GENMASK(8, 6) | ||
235 | #define AUTH_MODE_HASH 0 | ||
236 | #define AUTH_MODE_HMAC 1 | ||
237 | #define AUTH_MODE_CCM 0 | ||
238 | #define AUTH_MODE_CMAC 1 | ||
239 | |||
240 | #define AUTH_KEY_SIZE_SHIFT 3 | ||
241 | #define AUTH_KEY_SIZE_MASK GENMASK(5, 3) | ||
242 | #define AUTH_KEY_SZ_AES128 0 | ||
243 | #define AUTH_KEY_SZ_AES256 2 | ||
244 | |||
245 | #define AUTH_ALG_SHIFT 0 | ||
246 | #define AUTH_ALG_MASK GENMASK(2, 0) | ||
247 | #define AUTH_ALG_NONE 0 | ||
248 | #define AUTH_ALG_SHA 1 | ||
249 | #define AUTH_ALG_AES 2 | ||
250 | #define AUTH_ALG_KASUMI 3 | ||
251 | #define AUTH_ALG_SNOW3G 4 | ||
252 | #define AUTH_ALG_ZUC 5 | ||
253 | |||
254 | /* Register bits - REG_ENCR_XTS_DU_SIZE */ | ||
255 | #define ENCR_XTS_DU_SIZE_SHIFT 0 | ||
256 | #define ENCR_XTS_DU_SIZE_MASK GENMASK(19, 0) | ||
257 | |||
258 | /* Register bits - REG_ENCR_SEG_CFG */ | ||
259 | #define F8_KEYSTREAM_ENABLE_SHIFT 17 | ||
260 | #define F8_KEYSTREAM_DISABLED 0 | ||
261 | #define F8_KEYSTREAM_ENABLED 1 | ||
262 | |||
263 | #define F8_DIRECTION_SHIFT 16 | ||
264 | #define F8_DIRECTION_UPLINK 0 | ||
265 | #define F8_DIRECTION_DOWNLINK 1 | ||
266 | |||
267 | #define USE_PIPE_KEY_ENCR_SHIFT 15 | ||
268 | #define USE_PIPE_KEY_ENCR_ENABLED 1 | ||
269 | #define USE_KEY_REGISTERS 0 | ||
270 | |||
271 | #define USE_HW_KEY_ENCR_SHIFT 14 | ||
272 | #define USE_KEY_REG 0 | ||
273 | #define USE_HW_KEY 1 | ||
274 | |||
275 | #define LAST_CCM_SHIFT 13 | ||
276 | #define LAST_CCM_XFR 1 | ||
277 | #define INTERM_CCM_XFR 0 | ||
278 | |||
279 | #define CNTR_ALG_SHIFT 11 | ||
280 | #define CNTR_ALG_MASK GENMASK(12, 11) | ||
281 | #define CNTR_ALG_NIST 0 | ||
282 | |||
283 | #define ENCODE_SHIFT 10 | ||
284 | |||
285 | #define ENCR_MODE_SHIFT 6 | ||
286 | #define ENCR_MODE_MASK GENMASK(9, 6) | ||
287 | #define ENCR_MODE_ECB 0 | ||
288 | #define ENCR_MODE_CBC 1 | ||
289 | #define ENCR_MODE_CTR 2 | ||
290 | #define ENCR_MODE_XTS 3 | ||
291 | #define ENCR_MODE_CCM 4 | ||
292 | |||
293 | #define ENCR_KEY_SZ_SHIFT 3 | ||
294 | #define ENCR_KEY_SZ_MASK GENMASK(5, 3) | ||
295 | #define ENCR_KEY_SZ_DES 0 | ||
296 | #define ENCR_KEY_SZ_3DES 1 | ||
297 | #define ENCR_KEY_SZ_AES128 0 | ||
298 | #define ENCR_KEY_SZ_AES256 2 | ||
299 | |||
300 | #define ENCR_ALG_SHIFT 0 | ||
301 | #define ENCR_ALG_MASK GENMASK(2, 0) | ||
302 | #define ENCR_ALG_NONE 0 | ||
303 | #define ENCR_ALG_DES 1 | ||
304 | #define ENCR_ALG_AES 2 | ||
305 | #define ENCR_ALG_KASUMI 4 | ||
306 | #define ENCR_ALG_SNOW_3G 5 | ||
307 | #define ENCR_ALG_ZUC 6 | ||
308 | |||
309 | /* Register bits - REG_GOPROC */ | ||
310 | #define GO_SHIFT 0 | ||
311 | #define CLR_CNTXT_SHIFT 1 | ||
312 | #define RESULTS_DUMP_SHIFT 2 | ||
313 | |||
314 | /* Register bits - REG_ENGINES_AVAIL */ | ||
315 | #define ENCR_AES_SEL_SHIFT 0 | ||
316 | #define DES_SEL_SHIFT 1 | ||
317 | #define ENCR_SNOW3G_SEL_SHIFT 2 | ||
318 | #define ENCR_KASUMI_SEL_SHIFT 3 | ||
319 | #define SHA_SEL_SHIFT 4 | ||
320 | #define SHA512_SEL_SHIFT 5 | ||
321 | #define AUTH_AES_SEL_SHIFT 6 | ||
322 | #define AUTH_SNOW3G_SEL_SHIFT 7 | ||
323 | #define AUTH_KASUMI_SEL_SHIFT 8 | ||
324 | #define BAM_PIPE_SETS_SHIFT 9 | ||
325 | #define BAM_PIPE_SETS_MASK GENMASK(12, 9) | ||
326 | #define AXI_WR_BEATS_SHIFT 13 | ||
327 | #define AXI_WR_BEATS_MASK GENMASK(18, 13) | ||
328 | #define AXI_RD_BEATS_SHIFT 19 | ||
329 | #define AXI_RD_BEATS_MASK GENMASK(24, 19) | ||
330 | #define ENCR_ZUC_SEL_SHIFT 26 | ||
331 | #define AUTH_ZUC_SEL_SHIFT 27 | ||
332 | #define ZUC_ENABLE_SHIFT 28 | ||
333 | |||
334 | #endif /* _REGS_V5_H_ */ | ||
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c new file mode 100644 index 000000000000..f3385934eed2 --- /dev/null +++ b/drivers/crypto/qce/sha.c | |||
@@ -0,0 +1,588 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <crypto/internal/hash.h> | ||
17 | |||
18 | #include "common.h" | ||
19 | #include "core.h" | ||
20 | #include "sha.h" | ||
21 | |||
22 | /* crypto hw padding constant for first operation */ | ||
23 | #define SHA_PADDING 64 | ||
24 | #define SHA_PADDING_MASK (SHA_PADDING - 1) | ||
25 | |||
26 | static LIST_HEAD(ahash_algs); | ||
27 | |||
28 | static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { | ||
29 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 | ||
30 | }; | ||
31 | |||
32 | static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { | ||
33 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | ||
34 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 | ||
35 | }; | ||
36 | |||
37 | static void qce_ahash_done(void *data) | ||
38 | { | ||
39 | struct crypto_async_request *async_req = data; | ||
40 | struct ahash_request *req = ahash_request_cast(async_req); | ||
41 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
42 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
43 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | ||
44 | struct qce_device *qce = tmpl->qce; | ||
45 | struct qce_result_dump *result = qce->dma.result_buf; | ||
46 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
47 | int error; | ||
48 | u32 status; | ||
49 | |||
50 | error = qce_dma_terminate_all(&qce->dma); | ||
51 | if (error) | ||
52 | dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); | ||
53 | |||
54 | qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
55 | rctx->src_chained); | ||
56 | qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
57 | |||
58 | memcpy(rctx->digest, result->auth_iv, digestsize); | ||
59 | if (req->result) | ||
60 | memcpy(req->result, result->auth_iv, digestsize); | ||
61 | |||
62 | rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); | ||
63 | rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); | ||
64 | |||
65 | error = qce_check_status(qce, &status); | ||
66 | if (error < 0) | ||
67 | dev_dbg(qce->dev, "ahash operation error (%x)\n", status); | ||
68 | |||
69 | req->src = rctx->src_orig; | ||
70 | req->nbytes = rctx->nbytes_orig; | ||
71 | rctx->last_blk = false; | ||
72 | rctx->first_blk = false; | ||
73 | |||
74 | qce->async_req_done(tmpl->qce, error); | ||
75 | } | ||
76 | |||
77 | static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) | ||
78 | { | ||
79 | struct ahash_request *req = ahash_request_cast(async_req); | ||
80 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
81 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); | ||
82 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | ||
83 | struct qce_device *qce = tmpl->qce; | ||
84 | unsigned long flags = rctx->flags; | ||
85 | int ret; | ||
86 | |||
87 | if (IS_SHA_HMAC(flags)) { | ||
88 | rctx->authkey = ctx->authkey; | ||
89 | rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; | ||
90 | } else if (IS_CMAC(flags)) { | ||
91 | rctx->authkey = ctx->authkey; | ||
92 | rctx->authklen = AES_KEYSIZE_128; | ||
93 | } | ||
94 | |||
95 | rctx->src_nents = qce_countsg(req->src, req->nbytes, | ||
96 | &rctx->src_chained); | ||
97 | ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
98 | rctx->src_chained); | ||
99 | if (ret < 0) | ||
100 | return ret; | ||
101 | |||
102 | sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); | ||
103 | |||
104 | ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
105 | if (ret < 0) | ||
106 | goto error_unmap_src; | ||
107 | |||
108 | ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, | ||
109 | &rctx->result_sg, 1, qce_ahash_done, async_req); | ||
110 | if (ret) | ||
111 | goto error_unmap_dst; | ||
112 | |||
113 | qce_dma_issue_pending(&qce->dma); | ||
114 | |||
115 | ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); | ||
116 | if (ret) | ||
117 | goto error_terminate; | ||
118 | |||
119 | return 0; | ||
120 | |||
121 | error_terminate: | ||
122 | qce_dma_terminate_all(&qce->dma); | ||
123 | error_unmap_dst: | ||
124 | qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); | ||
125 | error_unmap_src: | ||
126 | qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, | ||
127 | rctx->src_chained); | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static int qce_ahash_init(struct ahash_request *req) | ||
132 | { | ||
133 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
134 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
135 | const u32 *std_iv = tmpl->std_iv; | ||
136 | |||
137 | memset(rctx, 0, sizeof(*rctx)); | ||
138 | rctx->first_blk = true; | ||
139 | rctx->last_blk = false; | ||
140 | rctx->flags = tmpl->alg_flags; | ||
141 | memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int qce_ahash_export(struct ahash_request *req, void *out) | ||
147 | { | ||
148 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
149 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
150 | unsigned long flags = rctx->flags; | ||
151 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
152 | unsigned int blocksize = | ||
153 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); | ||
154 | |||
155 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { | ||
156 | struct sha1_state *out_state = out; | ||
157 | |||
158 | out_state->count = rctx->count; | ||
159 | qce_cpu_to_be32p_array((__be32 *)out_state->state, | ||
160 | rctx->digest, digestsize); | ||
161 | memcpy(out_state->buffer, rctx->buf, blocksize); | ||
162 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { | ||
163 | struct sha256_state *out_state = out; | ||
164 | |||
165 | out_state->count = rctx->count; | ||
166 | qce_cpu_to_be32p_array((__be32 *)out_state->state, | ||
167 | rctx->digest, digestsize); | ||
168 | memcpy(out_state->buf, rctx->buf, blocksize); | ||
169 | } else { | ||
170 | return -EINVAL; | ||
171 | } | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int qce_import_common(struct ahash_request *req, u64 in_count, | ||
177 | const u32 *state, const u8 *buffer, bool hmac) | ||
178 | { | ||
179 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
180 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
181 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | ||
182 | unsigned int blocksize; | ||
183 | u64 count = in_count; | ||
184 | |||
185 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); | ||
186 | rctx->count = in_count; | ||
187 | memcpy(rctx->buf, buffer, blocksize); | ||
188 | |||
189 | if (in_count <= blocksize) { | ||
190 | rctx->first_blk = 1; | ||
191 | } else { | ||
192 | rctx->first_blk = 0; | ||
193 | /* | ||
194 | * For HMAC, there is a hardware padding done when first block | ||
195 | * is set. Therefore the byte_count must be incremened by 64 | ||
196 | * after the first block operation. | ||
197 | */ | ||
198 | if (hmac) | ||
199 | count += SHA_PADDING; | ||
200 | } | ||
201 | |||
202 | rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); | ||
203 | rctx->byte_count[1] = (__force __be32)(count >> 32); | ||
204 | qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, | ||
205 | digestsize); | ||
206 | rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int qce_ahash_import(struct ahash_request *req, const void *in) | ||
212 | { | ||
213 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
214 | unsigned long flags = rctx->flags; | ||
215 | bool hmac = IS_SHA_HMAC(flags); | ||
216 | int ret = -EINVAL; | ||
217 | |||
218 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { | ||
219 | const struct sha1_state *state = in; | ||
220 | |||
221 | ret = qce_import_common(req, state->count, state->state, | ||
222 | state->buffer, hmac); | ||
223 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { | ||
224 | const struct sha256_state *state = in; | ||
225 | |||
226 | ret = qce_import_common(req, state->count, state->state, | ||
227 | state->buf, hmac); | ||
228 | } | ||
229 | |||
230 | return ret; | ||
231 | } | ||
232 | |||
233 | static int qce_ahash_update(struct ahash_request *req) | ||
234 | { | ||
235 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
236 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
237 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
238 | struct qce_device *qce = tmpl->qce; | ||
239 | struct scatterlist *sg_last, *sg; | ||
240 | unsigned int total, len; | ||
241 | unsigned int hash_later; | ||
242 | unsigned int nbytes; | ||
243 | unsigned int blocksize; | ||
244 | |||
245 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
246 | rctx->count += req->nbytes; | ||
247 | |||
248 | /* check for buffer from previous updates and append it */ | ||
249 | total = req->nbytes + rctx->buflen; | ||
250 | |||
251 | if (total <= blocksize) { | ||
252 | scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, | ||
253 | 0, req->nbytes, 0); | ||
254 | rctx->buflen += req->nbytes; | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /* save the original req structure fields */ | ||
259 | rctx->src_orig = req->src; | ||
260 | rctx->nbytes_orig = req->nbytes; | ||
261 | |||
262 | /* | ||
263 | * if we have data from previous update copy them on buffer. The old | ||
264 | * data will be combined with current request bytes. | ||
265 | */ | ||
266 | if (rctx->buflen) | ||
267 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); | ||
268 | |||
269 | /* calculate how many bytes will be hashed later */ | ||
270 | hash_later = total % blocksize; | ||
271 | if (hash_later) { | ||
272 | unsigned int src_offset = req->nbytes - hash_later; | ||
273 | scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, | ||
274 | hash_later, 0); | ||
275 | } | ||
276 | |||
277 | /* here nbytes is multiple of blocksize */ | ||
278 | nbytes = total - hash_later; | ||
279 | |||
280 | len = rctx->buflen; | ||
281 | sg = sg_last = req->src; | ||
282 | |||
283 | while (len < nbytes && sg) { | ||
284 | if (len + sg_dma_len(sg) > nbytes) | ||
285 | break; | ||
286 | len += sg_dma_len(sg); | ||
287 | sg_last = sg; | ||
288 | sg = scatterwalk_sg_next(sg); | ||
289 | } | ||
290 | |||
291 | if (!sg_last) | ||
292 | return -EINVAL; | ||
293 | |||
294 | sg_mark_end(sg_last); | ||
295 | |||
296 | if (rctx->buflen) { | ||
297 | sg_init_table(rctx->sg, 2); | ||
298 | sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); | ||
299 | scatterwalk_sg_chain(rctx->sg, 2, req->src); | ||
300 | req->src = rctx->sg; | ||
301 | } | ||
302 | |||
303 | req->nbytes = nbytes; | ||
304 | rctx->buflen = hash_later; | ||
305 | |||
306 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
307 | } | ||
308 | |||
309 | static int qce_ahash_final(struct ahash_request *req) | ||
310 | { | ||
311 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
312 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
313 | struct qce_device *qce = tmpl->qce; | ||
314 | |||
315 | if (!rctx->buflen) | ||
316 | return 0; | ||
317 | |||
318 | rctx->last_blk = true; | ||
319 | |||
320 | rctx->src_orig = req->src; | ||
321 | rctx->nbytes_orig = req->nbytes; | ||
322 | |||
323 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); | ||
324 | sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); | ||
325 | |||
326 | req->src = rctx->sg; | ||
327 | req->nbytes = rctx->buflen; | ||
328 | |||
329 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
330 | } | ||
331 | |||
332 | static int qce_ahash_digest(struct ahash_request *req) | ||
333 | { | ||
334 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | ||
335 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | ||
336 | struct qce_device *qce = tmpl->qce; | ||
337 | int ret; | ||
338 | |||
339 | ret = qce_ahash_init(req); | ||
340 | if (ret) | ||
341 | return ret; | ||
342 | |||
343 | rctx->src_orig = req->src; | ||
344 | rctx->nbytes_orig = req->nbytes; | ||
345 | rctx->first_blk = true; | ||
346 | rctx->last_blk = true; | ||
347 | |||
348 | return qce->async_req_enqueue(tmpl->qce, &req->base); | ||
349 | } | ||
350 | |||
351 | struct qce_ahash_result { | ||
352 | struct completion completion; | ||
353 | int error; | ||
354 | }; | ||
355 | |||
356 | static void qce_digest_complete(struct crypto_async_request *req, int error) | ||
357 | { | ||
358 | struct qce_ahash_result *result = req->data; | ||
359 | |||
360 | if (error == -EINPROGRESS) | ||
361 | return; | ||
362 | |||
363 | result->error = error; | ||
364 | complete(&result->completion); | ||
365 | } | ||
366 | |||
367 | static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
368 | unsigned int keylen) | ||
369 | { | ||
370 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | ||
371 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); | ||
372 | struct qce_ahash_result result; | ||
373 | struct ahash_request *req; | ||
374 | struct scatterlist sg; | ||
375 | unsigned int blocksize; | ||
376 | struct crypto_ahash *ahash_tfm; | ||
377 | u8 *buf; | ||
378 | int ret; | ||
379 | const char *alg_name; | ||
380 | |||
381 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
382 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); | ||
383 | |||
384 | if (keylen <= blocksize) { | ||
385 | memcpy(ctx->authkey, key, keylen); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | if (digestsize == SHA1_DIGEST_SIZE) | ||
390 | alg_name = "sha1-qce"; | ||
391 | else if (digestsize == SHA256_DIGEST_SIZE) | ||
392 | alg_name = "sha256-qce"; | ||
393 | else | ||
394 | return -EINVAL; | ||
395 | |||
396 | ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, | ||
397 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
398 | if (IS_ERR(ahash_tfm)) | ||
399 | return PTR_ERR(ahash_tfm); | ||
400 | |||
401 | req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); | ||
402 | if (!req) { | ||
403 | ret = -ENOMEM; | ||
404 | goto err_free_ahash; | ||
405 | } | ||
406 | |||
407 | init_completion(&result.completion); | ||
408 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
409 | qce_digest_complete, &result); | ||
410 | crypto_ahash_clear_flags(ahash_tfm, ~0); | ||
411 | |||
412 | buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); | ||
413 | if (!buf) { | ||
414 | ret = -ENOMEM; | ||
415 | goto err_free_req; | ||
416 | } | ||
417 | |||
418 | memcpy(buf, key, keylen); | ||
419 | sg_init_one(&sg, buf, keylen); | ||
420 | ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); | ||
421 | |||
422 | ret = crypto_ahash_digest(req); | ||
423 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
424 | ret = wait_for_completion_interruptible(&result.completion); | ||
425 | if (!ret) | ||
426 | ret = result.error; | ||
427 | } | ||
428 | |||
429 | if (ret) | ||
430 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
431 | |||
432 | kfree(buf); | ||
433 | err_free_req: | ||
434 | ahash_request_free(req); | ||
435 | err_free_ahash: | ||
436 | crypto_free_ahash(ahash_tfm); | ||
437 | return ret; | ||
438 | } | ||
439 | |||
440 | static int qce_ahash_cra_init(struct crypto_tfm *tfm) | ||
441 | { | ||
442 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
443 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); | ||
444 | |||
445 | crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); | ||
446 | memset(ctx, 0, sizeof(*ctx)); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | struct qce_ahash_def { | ||
451 | unsigned long flags; | ||
452 | const char *name; | ||
453 | const char *drv_name; | ||
454 | unsigned int digestsize; | ||
455 | unsigned int blocksize; | ||
456 | unsigned int statesize; | ||
457 | const u32 *std_iv; | ||
458 | }; | ||
459 | |||
460 | static const struct qce_ahash_def ahash_def[] = { | ||
461 | { | ||
462 | .flags = QCE_HASH_SHA1, | ||
463 | .name = "sha1", | ||
464 | .drv_name = "sha1-qce", | ||
465 | .digestsize = SHA1_DIGEST_SIZE, | ||
466 | .blocksize = SHA1_BLOCK_SIZE, | ||
467 | .statesize = sizeof(struct sha1_state), | ||
468 | .std_iv = std_iv_sha1, | ||
469 | }, | ||
470 | { | ||
471 | .flags = QCE_HASH_SHA256, | ||
472 | .name = "sha256", | ||
473 | .drv_name = "sha256-qce", | ||
474 | .digestsize = SHA256_DIGEST_SIZE, | ||
475 | .blocksize = SHA256_BLOCK_SIZE, | ||
476 | .statesize = sizeof(struct sha256_state), | ||
477 | .std_iv = std_iv_sha256, | ||
478 | }, | ||
479 | { | ||
480 | .flags = QCE_HASH_SHA1_HMAC, | ||
481 | .name = "hmac(sha1)", | ||
482 | .drv_name = "hmac-sha1-qce", | ||
483 | .digestsize = SHA1_DIGEST_SIZE, | ||
484 | .blocksize = SHA1_BLOCK_SIZE, | ||
485 | .statesize = sizeof(struct sha1_state), | ||
486 | .std_iv = std_iv_sha1, | ||
487 | }, | ||
488 | { | ||
489 | .flags = QCE_HASH_SHA256_HMAC, | ||
490 | .name = "hmac(sha256)", | ||
491 | .drv_name = "hmac-sha256-qce", | ||
492 | .digestsize = SHA256_DIGEST_SIZE, | ||
493 | .blocksize = SHA256_BLOCK_SIZE, | ||
494 | .statesize = sizeof(struct sha256_state), | ||
495 | .std_iv = std_iv_sha256, | ||
496 | }, | ||
497 | }; | ||
498 | |||
499 | static int qce_ahash_register_one(const struct qce_ahash_def *def, | ||
500 | struct qce_device *qce) | ||
501 | { | ||
502 | struct qce_alg_template *tmpl; | ||
503 | struct ahash_alg *alg; | ||
504 | struct crypto_alg *base; | ||
505 | int ret; | ||
506 | |||
507 | tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); | ||
508 | if (!tmpl) | ||
509 | return -ENOMEM; | ||
510 | |||
511 | tmpl->std_iv = def->std_iv; | ||
512 | |||
513 | alg = &tmpl->alg.ahash; | ||
514 | alg->init = qce_ahash_init; | ||
515 | alg->update = qce_ahash_update; | ||
516 | alg->final = qce_ahash_final; | ||
517 | alg->digest = qce_ahash_digest; | ||
518 | alg->export = qce_ahash_export; | ||
519 | alg->import = qce_ahash_import; | ||
520 | if (IS_SHA_HMAC(def->flags)) | ||
521 | alg->setkey = qce_ahash_hmac_setkey; | ||
522 | alg->halg.digestsize = def->digestsize; | ||
523 | alg->halg.statesize = def->statesize; | ||
524 | |||
525 | base = &alg->halg.base; | ||
526 | base->cra_blocksize = def->blocksize; | ||
527 | base->cra_priority = 300; | ||
528 | base->cra_flags = CRYPTO_ALG_ASYNC; | ||
529 | base->cra_ctxsize = sizeof(struct qce_sha_ctx); | ||
530 | base->cra_alignmask = 0; | ||
531 | base->cra_module = THIS_MODULE; | ||
532 | base->cra_init = qce_ahash_cra_init; | ||
533 | INIT_LIST_HEAD(&base->cra_list); | ||
534 | |||
535 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | ||
536 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
537 | def->drv_name); | ||
538 | |||
539 | INIT_LIST_HEAD(&tmpl->entry); | ||
540 | tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; | ||
541 | tmpl->alg_flags = def->flags; | ||
542 | tmpl->qce = qce; | ||
543 | |||
544 | ret = crypto_register_ahash(alg); | ||
545 | if (ret) { | ||
546 | kfree(tmpl); | ||
547 | dev_err(qce->dev, "%s registration failed\n", base->cra_name); | ||
548 | return ret; | ||
549 | } | ||
550 | |||
551 | list_add_tail(&tmpl->entry, &ahash_algs); | ||
552 | dev_dbg(qce->dev, "%s is registered\n", base->cra_name); | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | static void qce_ahash_unregister(struct qce_device *qce) | ||
557 | { | ||
558 | struct qce_alg_template *tmpl, *n; | ||
559 | |||
560 | list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { | ||
561 | crypto_unregister_ahash(&tmpl->alg.ahash); | ||
562 | list_del(&tmpl->entry); | ||
563 | kfree(tmpl); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | static int qce_ahash_register(struct qce_device *qce) | ||
568 | { | ||
569 | int ret, i; | ||
570 | |||
571 | for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { | ||
572 | ret = qce_ahash_register_one(&ahash_def[i], qce); | ||
573 | if (ret) | ||
574 | goto err; | ||
575 | } | ||
576 | |||
577 | return 0; | ||
578 | err: | ||
579 | qce_ahash_unregister(qce); | ||
580 | return ret; | ||
581 | } | ||
582 | |||
583 | const struct qce_algo_ops ahash_ops = { | ||
584 | .type = CRYPTO_ALG_TYPE_AHASH, | ||
585 | .register_algs = qce_ahash_register, | ||
586 | .unregister_algs = qce_ahash_unregister, | ||
587 | .async_req_handle = qce_ahash_async_req_handle, | ||
588 | }; | ||
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h new file mode 100644 index 000000000000..286f0d5397f3 --- /dev/null +++ b/drivers/crypto/qce/sha.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _SHA_H_ | ||
15 | #define _SHA_H_ | ||
16 | |||
17 | #include <crypto/scatterwalk.h> | ||
18 | #include <crypto/sha.h> | ||
19 | |||
20 | #include "common.h" | ||
21 | #include "core.h" | ||
22 | |||
23 | #define QCE_SHA_MAX_BLOCKSIZE SHA256_BLOCK_SIZE | ||
24 | #define QCE_SHA_MAX_DIGESTSIZE SHA256_DIGEST_SIZE | ||
25 | |||
26 | struct qce_sha_ctx { | ||
27 | u8 authkey[QCE_SHA_MAX_BLOCKSIZE]; | ||
28 | }; | ||
29 | |||
30 | /** | ||
31 | * struct qce_sha_reqctx - holds private ahash objects per request | ||
32 | * @buf: used during update, import and export | ||
33 | * @tmpbuf: buffer for internal use | ||
34 | * @digest: calculated digest buffer | ||
35 | * @buflen: length of the buffer | ||
36 | * @flags: operation flags | ||
37 | * @src_orig: original request sg list | ||
38 | * @nbytes_orig: original request number of bytes | ||
39 | * @src_chained: is source scatterlist chained | ||
40 | * @src_nents: source number of entries | ||
41 | * @byte_count: byte count | ||
42 | * @count: save count in states during update, import and export | ||
43 | * @first_blk: is it the first block | ||
44 | * @last_blk: is it the last block | ||
45 | * @sg: used to chain sg lists | ||
46 | * @authkey: pointer to auth key in sha ctx | ||
47 | * @authklen: auth key length | ||
48 | * @result_sg: scatterlist used for result buffer | ||
49 | */ | ||
50 | struct qce_sha_reqctx { | ||
51 | u8 buf[QCE_SHA_MAX_BLOCKSIZE]; | ||
52 | u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE]; | ||
53 | u8 digest[QCE_SHA_MAX_DIGESTSIZE]; | ||
54 | unsigned int buflen; | ||
55 | unsigned long flags; | ||
56 | struct scatterlist *src_orig; | ||
57 | unsigned int nbytes_orig; | ||
58 | bool src_chained; | ||
59 | int src_nents; | ||
60 | __be32 byte_count[2]; | ||
61 | u64 count; | ||
62 | bool first_blk; | ||
63 | bool last_blk; | ||
64 | struct scatterlist sg[2]; | ||
65 | u8 *authkey; | ||
66 | unsigned int authklen; | ||
67 | struct scatterlist result_sg; | ||
68 | }; | ||
69 | |||
70 | static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm) | ||
71 | { | ||
72 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
73 | struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash), | ||
74 | struct ahash_alg, halg); | ||
75 | |||
76 | return container_of(alg, struct qce_alg_template, alg.ahash); | ||
77 | } | ||
78 | |||
79 | extern const struct qce_algo_ops ahash_ops; | ||
80 | |||
81 | #endif /* _SHA_H_ */ | ||
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index a999f537228f..92105f3dc8e0 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx) | |||
190 | static irqreturn_t cryp_interrupt_handler(int irq, void *param) | 190 | static irqreturn_t cryp_interrupt_handler(int irq, void *param) |
191 | { | 191 | { |
192 | struct cryp_ctx *ctx; | 192 | struct cryp_ctx *ctx; |
193 | int i; | 193 | int count; |
194 | struct cryp_device_data *device_data; | 194 | struct cryp_device_data *device_data; |
195 | 195 | ||
196 | if (param == NULL) { | 196 | if (param == NULL) { |
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) | |||
215 | if (cryp_pending_irq_src(device_data, | 215 | if (cryp_pending_irq_src(device_data, |
216 | CRYP_IRQ_SRC_OUTPUT_FIFO)) { | 216 | CRYP_IRQ_SRC_OUTPUT_FIFO)) { |
217 | if (ctx->outlen / ctx->blocksize > 0) { | 217 | if (ctx->outlen / ctx->blocksize > 0) { |
218 | for (i = 0; i < ctx->blocksize / 4; i++) { | 218 | count = ctx->blocksize / 4; |
219 | *(ctx->outdata) = readl_relaxed( | 219 | |
220 | &device_data->base->dout); | 220 | readsl(&device_data->base->dout, ctx->outdata, count); |
221 | ctx->outdata += 4; | 221 | ctx->outdata += count; |
222 | ctx->outlen -= 4; | 222 | ctx->outlen -= count; |
223 | } | ||
224 | 223 | ||
225 | if (ctx->outlen == 0) { | 224 | if (ctx->outlen == 0) { |
226 | cryp_disable_irq_src(device_data, | 225 | cryp_disable_irq_src(device_data, |
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) | |||
230 | } else if (cryp_pending_irq_src(device_data, | 229 | } else if (cryp_pending_irq_src(device_data, |
231 | CRYP_IRQ_SRC_INPUT_FIFO)) { | 230 | CRYP_IRQ_SRC_INPUT_FIFO)) { |
232 | if (ctx->datalen / ctx->blocksize > 0) { | 231 | if (ctx->datalen / ctx->blocksize > 0) { |
233 | for (i = 0 ; i < ctx->blocksize / 4; i++) { | 232 | count = ctx->blocksize / 4; |
234 | writel_relaxed(ctx->indata, | 233 | |
235 | &device_data->base->din); | 234 | writesl(&device_data->base->din, ctx->indata, count); |
236 | ctx->indata += 4; | 235 | |
237 | ctx->datalen -= 4; | 236 | ctx->indata += count; |
238 | } | 237 | ctx->datalen -= count; |
239 | 238 | ||
240 | if (ctx->datalen == 0) | 239 | if (ctx->datalen == 0) |
241 | cryp_disable_irq_src(device_data, | 240 | cryp_disable_irq_src(device_data, |
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 0edf949f6369..94b19be67574 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h | |||
@@ -75,9 +75,9 @@ static inline void aead_givcrypt_free(struct aead_givcrypt_request *req) | |||
75 | 75 | ||
76 | static inline void aead_givcrypt_set_callback( | 76 | static inline void aead_givcrypt_set_callback( |
77 | struct aead_givcrypt_request *req, u32 flags, | 77 | struct aead_givcrypt_request *req, u32 flags, |
78 | crypto_completion_t complete, void *data) | 78 | crypto_completion_t compl, void *data) |
79 | { | 79 | { |
80 | aead_request_set_callback(&req->areq, flags, complete, data); | 80 | aead_request_set_callback(&req->areq, flags, compl, data); |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req, | 83 | static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req, |
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 016c2f110f63..623a59c1ff5a 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
@@ -410,4 +410,10 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size) | |||
410 | return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; | 410 | return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; |
411 | } | 411 | } |
412 | 412 | ||
413 | static inline void crypto_yield(u32 flags) | ||
414 | { | ||
415 | if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) | ||
416 | cond_resched(); | ||
417 | } | ||
418 | |||
413 | #endif /* _CRYPTO_ALGAPI_H */ | 419 | #endif /* _CRYPTO_ALGAPI_H */ |
diff --git a/include/crypto/des.h b/include/crypto/des.h index 2971c6304ade..fc6274c6bb26 100644 --- a/include/crypto/des.h +++ b/include/crypto/des.h | |||
@@ -16,4 +16,7 @@ | |||
16 | 16 | ||
17 | extern unsigned long des_ekey(u32 *pe, const u8 *k); | 17 | extern unsigned long des_ekey(u32 *pe, const u8 *k); |
18 | 18 | ||
19 | extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, | ||
20 | unsigned int keylen); | ||
21 | |||
19 | #endif /* __CRYPTO_DES_H */ | 22 | #endif /* __CRYPTO_DES_H */ |
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h new file mode 100644 index 000000000000..831d786976c5 --- /dev/null +++ b/include/crypto/drbg.h | |||
@@ -0,0 +1,290 @@ | |||
1 | /* | ||
2 | * DRBG based on NIST SP800-90A | ||
3 | * | ||
4 | * Copyright Stephan Mueller <smueller@chronox.de>, 2014 | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions | ||
8 | * are met: | ||
9 | * 1. Redistributions of source code must retain the above copyright | ||
10 | * notice, and the entire permission notice in its entirety, | ||
11 | * including the disclaimer of warranties. | ||
12 | * 2. Redistributions in binary form must reproduce the above copyright | ||
13 | * notice, this list of conditions and the following disclaimer in the | ||
14 | * documentation and/or other materials provided with the distribution. | ||
15 | * 3. The name of the author may not be used to endorse or promote | ||
16 | * products derived from this software without specific prior | ||
17 | * written permission. | ||
18 | * | ||
19 | * ALTERNATIVELY, this product may be distributed under the terms of | ||
20 | * the GNU General Public License, in which case the provisions of the GPL are | ||
21 | * required INSTEAD OF the above restrictions. (This clause is | ||
22 | * necessary due to a potential bad interaction between the GPL and | ||
23 | * the restrictions contained in a BSD-style copyright.) | ||
24 | * | ||
25 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
26 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | ||
27 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF | ||
28 | * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE | ||
29 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT | ||
31 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
32 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
33 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
34 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
35 | * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH | ||
36 | * DAMAGE. | ||
37 | */ | ||
38 | |||
39 | #ifndef _DRBG_H | ||
40 | #define _DRBG_H | ||
41 | |||
42 | |||
43 | #include <linux/random.h> | ||
44 | #include <linux/scatterlist.h> | ||
45 | #include <crypto/hash.h> | ||
46 | #include <linux/module.h> | ||
47 | #include <linux/crypto.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <crypto/internal/rng.h> | ||
50 | #include <crypto/rng.h> | ||
51 | #include <linux/fips.h> | ||
52 | #include <linux/spinlock.h> | ||
53 | #include <linux/list.h> | ||
54 | |||
55 | /* | ||
56 | * Concatenation Helper and string operation helper | ||
57 | * | ||
58 | * SP800-90A requires the concatenation of different data. To avoid copying | ||
59 | * buffers around or allocate additional memory, the following data structure | ||
60 | * is used to point to the original memory with its size. In addition, it | ||
61 | * is used to build a linked list. The linked list defines the concatenation | ||
62 | * of individual buffers. The order of memory block referenced in that | ||
63 | * linked list determines the order of concatenation. | ||
64 | */ | ||
65 | struct drbg_string { | ||
66 | const unsigned char *buf; | ||
67 | size_t len; | ||
68 | struct list_head list; | ||
69 | }; | ||
70 | |||
71 | static inline void drbg_string_fill(struct drbg_string *string, | ||
72 | const unsigned char *buf, size_t len) | ||
73 | { | ||
74 | string->buf = buf; | ||
75 | string->len = len; | ||
76 | INIT_LIST_HEAD(&string->list); | ||
77 | } | ||
78 | |||
79 | struct drbg_state; | ||
80 | typedef uint32_t drbg_flag_t; | ||
81 | |||
82 | struct drbg_core { | ||
83 | drbg_flag_t flags; /* flags for the cipher */ | ||
84 | __u8 statelen; /* maximum state length */ | ||
85 | /* | ||
86 | * maximum length of personalization string or additional input | ||
87 | * string -- exponent for base 2 | ||
88 | */ | ||
89 | __u8 max_addtllen; | ||
90 | /* maximum bits per RNG request -- exponent for base 2*/ | ||
91 | __u8 max_bits; | ||
92 | /* maximum number of requests -- exponent for base 2 */ | ||
93 | __u8 max_req; | ||
94 | __u8 blocklen_bytes; /* block size of output in bytes */ | ||
95 | char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ | ||
96 | /* kernel crypto API backend cipher name */ | ||
97 | char backend_cra_name[CRYPTO_MAX_ALG_NAME]; | ||
98 | }; | ||
99 | |||
100 | struct drbg_state_ops { | ||
101 | int (*update)(struct drbg_state *drbg, struct list_head *seed, | ||
102 | int reseed); | ||
103 | int (*generate)(struct drbg_state *drbg, | ||
104 | unsigned char *buf, unsigned int buflen, | ||
105 | struct list_head *addtl); | ||
106 | int (*crypto_init)(struct drbg_state *drbg); | ||
107 | int (*crypto_fini)(struct drbg_state *drbg); | ||
108 | |||
109 | }; | ||
110 | |||
111 | struct drbg_test_data { | ||
112 | struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ | ||
113 | }; | ||
114 | |||
115 | struct drbg_state { | ||
116 | spinlock_t drbg_lock; /* lock around DRBG */ | ||
117 | unsigned char *V; /* internal state 10.1.1.1 1a) */ | ||
118 | /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ | ||
119 | unsigned char *C; | ||
120 | /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ | ||
121 | size_t reseed_ctr; | ||
122 | /* some memory the DRBG can use for its operation */ | ||
123 | unsigned char *scratchpad; | ||
124 | void *priv_data; /* Cipher handle */ | ||
125 | bool seeded; /* DRBG fully seeded? */ | ||
126 | bool pr; /* Prediction resistance enabled? */ | ||
127 | #ifdef CONFIG_CRYPTO_FIPS | ||
128 | bool fips_primed; /* Continuous test primed? */ | ||
129 | unsigned char *prev; /* FIPS 140-2 continuous test value */ | ||
130 | #endif | ||
131 | const struct drbg_state_ops *d_ops; | ||
132 | const struct drbg_core *core; | ||
133 | struct drbg_test_data *test_data; | ||
134 | }; | ||
135 | |||
136 | static inline __u8 drbg_statelen(struct drbg_state *drbg) | ||
137 | { | ||
138 | if (drbg && drbg->core) | ||
139 | return drbg->core->statelen; | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static inline __u8 drbg_blocklen(struct drbg_state *drbg) | ||
144 | { | ||
145 | if (drbg && drbg->core) | ||
146 | return drbg->core->blocklen_bytes; | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static inline __u8 drbg_keylen(struct drbg_state *drbg) | ||
151 | { | ||
152 | if (drbg && drbg->core) | ||
153 | return (drbg->core->statelen - drbg->core->blocklen_bytes); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) | ||
158 | { | ||
159 | /* max_bits is in bits, but buflen is in bytes */ | ||
160 | return (1 << (drbg->core->max_bits - 3)); | ||
161 | } | ||
162 | |||
163 | static inline size_t drbg_max_addtl(struct drbg_state *drbg) | ||
164 | { | ||
165 | return (1UL<<(drbg->core->max_addtllen)); | ||
166 | } | ||
167 | |||
168 | static inline size_t drbg_max_requests(struct drbg_state *drbg) | ||
169 | { | ||
170 | return (1UL<<(drbg->core->max_req)); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * kernel crypto API input data structure for DRBG generate in case dlen | ||
175 | * is set to 0 | ||
176 | */ | ||
177 | struct drbg_gen { | ||
178 | unsigned char *outbuf; /* output buffer for random numbers */ | ||
179 | unsigned int outlen; /* size of output buffer */ | ||
180 | struct drbg_string *addtl; /* additional information string */ | ||
181 | struct drbg_test_data *test_data; /* test data */ | ||
182 | }; | ||
183 | |||
184 | /* | ||
185 | * This is a wrapper to the kernel crypto API function of | ||
186 | * crypto_rng_get_bytes() to allow the caller to provide additional data. | ||
187 | * | ||
188 | * @drng DRBG handle -- see crypto_rng_get_bytes | ||
189 | * @outbuf output buffer -- see crypto_rng_get_bytes | ||
190 | * @outlen length of output buffer -- see crypto_rng_get_bytes | ||
191 | * @addtl_input additional information string input buffer | ||
192 | * @addtllen length of additional information string buffer | ||
193 | * | ||
194 | * return | ||
195 | * see crypto_rng_get_bytes | ||
196 | */ | ||
197 | static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, | ||
198 | unsigned char *outbuf, unsigned int outlen, | ||
199 | struct drbg_string *addtl) | ||
200 | { | ||
201 | int ret; | ||
202 | struct drbg_gen genbuf; | ||
203 | genbuf.outbuf = outbuf; | ||
204 | genbuf.outlen = outlen; | ||
205 | genbuf.addtl = addtl; | ||
206 | genbuf.test_data = NULL; | ||
207 | ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0); | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * TEST code | ||
213 | * | ||
214 | * This is a wrapper to the kernel crypto API function of | ||
215 | * crypto_rng_get_bytes() to allow the caller to provide additional data and | ||
216 | * allow furnishing of test_data | ||
217 | * | ||
218 | * @drng DRBG handle -- see crypto_rng_get_bytes | ||
219 | * @outbuf output buffer -- see crypto_rng_get_bytes | ||
220 | * @outlen length of output buffer -- see crypto_rng_get_bytes | ||
221 | * @addtl_input additional information string input buffer | ||
222 | * @addtllen length of additional information string buffer | ||
223 | * @test_data filled test data | ||
224 | * | ||
225 | * return | ||
226 | * see crypto_rng_get_bytes | ||
227 | */ | ||
228 | static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, | ||
229 | unsigned char *outbuf, unsigned int outlen, | ||
230 | struct drbg_string *addtl, | ||
231 | struct drbg_test_data *test_data) | ||
232 | { | ||
233 | int ret; | ||
234 | struct drbg_gen genbuf; | ||
235 | genbuf.outbuf = outbuf; | ||
236 | genbuf.outlen = outlen; | ||
237 | genbuf.addtl = addtl; | ||
238 | genbuf.test_data = test_data; | ||
239 | ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0); | ||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * TEST code | ||
245 | * | ||
246 | * This is a wrapper to the kernel crypto API function of | ||
247 | * crypto_rng_reset() to allow the caller to provide test_data | ||
248 | * | ||
249 | * @drng DRBG handle -- see crypto_rng_reset | ||
250 | * @pers personalization string input buffer | ||
251 | * @perslen length of additional information string buffer | ||
252 | * @test_data filled test data | ||
253 | * | ||
254 | * return | ||
255 | * see crypto_rng_reset | ||
256 | */ | ||
257 | static inline int crypto_drbg_reset_test(struct crypto_rng *drng, | ||
258 | struct drbg_string *pers, | ||
259 | struct drbg_test_data *test_data) | ||
260 | { | ||
261 | int ret; | ||
262 | struct drbg_gen genbuf; | ||
263 | genbuf.outbuf = NULL; | ||
264 | genbuf.outlen = 0; | ||
265 | genbuf.addtl = pers; | ||
266 | genbuf.test_data = test_data; | ||
267 | ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0); | ||
268 | return ret; | ||
269 | } | ||
270 | |||
271 | /* DRBG type flags */ | ||
272 | #define DRBG_CTR ((drbg_flag_t)1<<0) | ||
273 | #define DRBG_HMAC ((drbg_flag_t)1<<1) | ||
274 | #define DRBG_HASH ((drbg_flag_t)1<<2) | ||
275 | #define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH) | ||
276 | /* DRBG strength flags */ | ||
277 | #define DRBG_STRENGTH128 ((drbg_flag_t)1<<3) | ||
278 | #define DRBG_STRENGTH192 ((drbg_flag_t)1<<4) | ||
279 | #define DRBG_STRENGTH256 ((drbg_flag_t)1<<5) | ||
280 | #define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \ | ||
281 | DRBG_STRENGTH256) | ||
282 | |||
283 | enum drbg_prefixes { | ||
284 | DRBG_PREFIX0 = 0x00, | ||
285 | DRBG_PREFIX1, | ||
286 | DRBG_PREFIX2, | ||
287 | DRBG_PREFIX3 | ||
288 | }; | ||
289 | |||
290 | #endif /* _DRBG_H */ | ||
diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 26cb1eb16f4c..a39195539601 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h | |||
@@ -238,10 +238,10 @@ static inline struct ahash_request *ahash_request_cast( | |||
238 | 238 | ||
239 | static inline void ahash_request_set_callback(struct ahash_request *req, | 239 | static inline void ahash_request_set_callback(struct ahash_request *req, |
240 | u32 flags, | 240 | u32 flags, |
241 | crypto_completion_t complete, | 241 | crypto_completion_t compl, |
242 | void *data) | 242 | void *data) |
243 | { | 243 | { |
244 | req->base.complete = complete; | 244 | req->base.complete = compl; |
245 | req->base.data = data; | 245 | req->base.data = data; |
246 | req->base.flags = flags; | 246 | req->base.flags = flags; |
247 | } | 247 | } |
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 06e8b32d541c..b3a46c515d1b 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h | |||
@@ -81,8 +81,7 @@ static inline int skcipher_enqueue_givcrypt( | |||
81 | static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( | 81 | static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( |
82 | struct crypto_queue *queue) | 82 | struct crypto_queue *queue) |
83 | { | 83 | { |
84 | return __crypto_dequeue_request( | 84 | return skcipher_givcrypt_cast(crypto_dequeue_request(queue)); |
85 | queue, offsetof(struct skcipher_givcrypt_request, creq.base)); | ||
86 | } | 85 | } |
87 | 86 | ||
88 | static inline void *skcipher_givcrypt_reqctx( | 87 | static inline void *skcipher_givcrypt_reqctx( |
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 6a626a507b8c..7ef512f8631c 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h | |||
@@ -25,12 +25,6 @@ | |||
25 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | 27 | ||
28 | static inline void crypto_yield(u32 flags) | ||
29 | { | ||
30 | if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) | ||
31 | cond_resched(); | ||
32 | } | ||
33 | |||
34 | static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, | 28 | static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, |
35 | struct scatterlist *sg2) | 29 | struct scatterlist *sg2) |
36 | { | 30 | { |
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 25fd6126522d..07d245f073d1 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h | |||
@@ -86,9 +86,9 @@ static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req) | |||
86 | 86 | ||
87 | static inline void skcipher_givcrypt_set_callback( | 87 | static inline void skcipher_givcrypt_set_callback( |
88 | struct skcipher_givcrypt_request *req, u32 flags, | 88 | struct skcipher_givcrypt_request *req, u32 flags, |
89 | crypto_completion_t complete, void *data) | 89 | crypto_completion_t compl, void *data) |
90 | { | 90 | { |
91 | ablkcipher_request_set_callback(&req->creq, flags, complete, data); | 91 | ablkcipher_request_set_callback(&req->creq, flags, compl, data); |
92 | } | 92 | } |
93 | 93 | ||
94 | static inline void skcipher_givcrypt_set_crypt( | 94 | static inline void skcipher_givcrypt_set_crypt( |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b92eadf92d72..d45e949699ea 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -710,9 +710,9 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req) | |||
710 | 710 | ||
711 | static inline void ablkcipher_request_set_callback( | 711 | static inline void ablkcipher_request_set_callback( |
712 | struct ablkcipher_request *req, | 712 | struct ablkcipher_request *req, |
713 | u32 flags, crypto_completion_t complete, void *data) | 713 | u32 flags, crypto_completion_t compl, void *data) |
714 | { | 714 | { |
715 | req->base.complete = complete; | 715 | req->base.complete = compl; |
716 | req->base.data = data; | 716 | req->base.data = data; |
717 | req->base.flags = flags; | 717 | req->base.flags = flags; |
718 | } | 718 | } |
@@ -841,10 +841,10 @@ static inline void aead_request_free(struct aead_request *req) | |||
841 | 841 | ||
842 | static inline void aead_request_set_callback(struct aead_request *req, | 842 | static inline void aead_request_set_callback(struct aead_request *req, |
843 | u32 flags, | 843 | u32 flags, |
844 | crypto_completion_t complete, | 844 | crypto_completion_t compl, |
845 | void *data) | 845 | void *data) |
846 | { | 846 | { |
847 | req->base.complete = complete; | 847 | req->base.complete = compl; |
848 | req->base.data = data; | 848 | req->base.data = data; |
849 | req->base.flags = flags; | 849 | req->base.flags = flags; |
850 | } | 850 | } |
diff --git a/kernel/module.c b/kernel/module.c index 81e727cf6df9..ae79ce615cb9 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -60,7 +60,6 @@ | |||
60 | #include <linux/jump_label.h> | 60 | #include <linux/jump_label.h> |
61 | #include <linux/pfn.h> | 61 | #include <linux/pfn.h> |
62 | #include <linux/bsearch.h> | 62 | #include <linux/bsearch.h> |
63 | #include <linux/fips.h> | ||
64 | #include <uapi/linux/module.h> | 63 | #include <uapi/linux/module.h> |
65 | #include "module-internal.h" | 64 | #include "module-internal.h" |
66 | 65 | ||
@@ -2448,9 +2447,6 @@ static int module_sig_check(struct load_info *info) | |||
2448 | } | 2447 | } |
2449 | 2448 | ||
2450 | /* Not having a signature is only an error if we're strict. */ | 2449 | /* Not having a signature is only an error if we're strict. */ |
2451 | if (err < 0 && fips_enabled) | ||
2452 | panic("Module verification failed with error %d in FIPS mode\n", | ||
2453 | err); | ||
2454 | if (err == -ENOKEY && !sig_enforce) | 2450 | if (err == -ENOKEY && !sig_enforce) |
2455 | err = 0; | 2451 | err = 0; |
2456 | 2452 | ||