aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 16:33:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 16:33:26 -0500
commite3aa91a7cb21a595169b20c64f63ca39a91a0c43 (patch)
tree6a92a2e595629949a45336c770c2408abba8444d
parent78a45c6f067824cf5d0a9fedea7339ac2e28603c (diff)
parent8606813a6c8997fd3bb805186056d78670eb86ca (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - The crypto API is now documented :) - Disallow arbitrary module loading through crypto API. - Allow get request with empty driver name through crypto_user. - Allow speed testing of arbitrary hash functions. - Add caam support for ctr(aes), gcm(aes) and their derivatives. - nx now supports concurrent hashing properly. - Add sahara support for SHA1/256. - Add ARM64 version of CRC32. - Misc fixes. * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (77 commits) crypto: tcrypt - Allow speed testing of arbitrary hash functions crypto: af_alg - add user space interface for AEAD crypto: qat - fix problem with coalescing enable logic crypto: sahara - add support for SHA1/256 crypto: sahara - replace tasklets with kthread crypto: sahara - add support for i.MX53 crypto: sahara - fix spinlock initialization crypto: arm - replace memset by memzero_explicit crypto: powerpc - replace memset by memzero_explicit crypto: sha - replace memset by memzero_explicit crypto: sparc - replace memset by memzero_explicit crypto: algif_skcipher - initialize upon init request crypto: algif_skcipher - removed unneeded code crypto: algif_skcipher - Fixed blocking recvmsg crypto: drbg - use memzero_explicit() for clearing sensitive data crypto: drbg - use MODULE_ALIAS_CRYPTO crypto: include crypto- module prefix in template crypto: user - add MODULE_ALIAS crypto: sha-mb - remove a bogus NULL check crytpo: qat - Fix 64 bytes requests ...
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/crypto-API.tmpl1253
-rw-r--r--Documentation/crypto/crypto-API-userspace.txt205
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt2
-rw-r--r--Documentation/devicetree/bindings/hwrng/atmel-trng.txt16
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi7
-rw-r--r--arch/arm/crypto/aes_glue.c4
-rw-r--r--arch/arm/crypto/sha1_glue.c2
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c2
-rw-r--r--arch/arm/crypto/sha512_neon_glue.c6
-rw-r--r--arch/arm64/crypto/Kconfig4
-rw-r--r--arch/arm64/crypto/Makefile4
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c2
-rw-r--r--arch/arm64/crypto/aes-glue.c8
-rw-r--r--arch/arm64/crypto/crc32-arm64.c274
-rw-r--r--arch/powerpc/crypto/sha1.c4
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/s390/crypto/des_s390.c4
-rw-r--r--arch/s390/crypto/ghash_s390.c2
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c4
-rw-r--r--arch/s390/crypto/sha512_s390.c4
-rw-r--r--arch/sparc/crypto/aes_glue.c2
-rw-r--r--arch/sparc/crypto/camellia_glue.c2
-rw-r--r--arch/sparc/crypto/crc32c_glue.c2
-rw-r--r--arch/sparc/crypto/des_glue.c2
-rw-r--r--arch/sparc/crypto/md5_glue.c2
-rw-r--r--arch/sparc/crypto/sha1_glue.c2
-rw-r--r--arch/sparc/crypto/sha256_glue.c6
-rw-r--r--arch/sparc/crypto/sha512_glue.c6
-rw-r--r--arch/x86/crypto/aes_glue.c4
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c10
-rw-r--r--arch/x86/crypto/blowfish_glue.c4
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c4
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c4
-rw-r--r--arch/x86/crypto/camellia_glue.c4
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c2
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c4
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c4
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c4
-rw-r--r--arch/x86/crypto/des3_ede_glue.c8
-rw-r--r--arch/x86/crypto/fpu.c3
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c2
-rw-r--r--arch/x86/crypto/salsa20_glue.c4
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c4
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c2
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c3
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c6
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c6
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c2
-rw-r--r--arch/x86/crypto/twofish_glue.c4
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c4
-rw-r--r--crypto/842.c1
-rw-r--r--crypto/aes_generic.c2
-rw-r--r--crypto/af_alg.c6
-rw-r--r--crypto/algapi.c4
-rw-r--r--crypto/algif_hash.c4
-rw-r--r--crypto/algif_skcipher.c15
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c1
-rw-r--r--crypto/api.c4
-rw-r--r--crypto/arc4.c1
-rw-r--r--crypto/authenc.c1
-rw-r--r--crypto/authencesn.c1
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c1
-rw-r--r--crypto/ccm.c5
-rw-r--r--crypto/chainiv.c1
-rw-r--r--crypto/cmac.c1
-rw-r--r--crypto/crc32.c1
-rw-r--r--crypto/crc32c_generic.c2
-rw-r--r--crypto/crct10dif_generic.c2
-rw-r--r--crypto/cryptd.c1
-rw-r--r--crypto/crypto_null.c6
-rw-r--r--crypto/crypto_user.c6
-rw-r--r--crypto/ctr.c3
-rw-r--r--crypto/cts.c1
-rw-r--r--crypto/deflate.c2
-rw-r--r--crypto/des_generic.c2
-rw-r--r--crypto/drbg.c127
-rw-r--r--crypto/ecb.c1
-rw-r--r--crypto/eseqiv.c1
-rw-r--r--crypto/fcrypt.c1
-rw-r--r--crypto/gcm.c7
-rw-r--r--crypto/ghash-generic.c2
-rw-r--r--crypto/hmac.c1
-rw-r--r--crypto/khazad.c1
-rw-r--r--crypto/krng.c2
-rw-r--r--crypto/lrw.c1
-rw-r--r--crypto/lz4.c1
-rw-r--r--crypto/lz4hc.c1
-rw-r--r--crypto/lzo.c1
-rw-r--r--crypto/mcryptd.c1
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c1
-rw-r--r--crypto/michael_mic.c1
-rw-r--r--crypto/pcbc.c1
-rw-r--r--crypto/pcrypt.c1
-rw-r--r--crypto/rmd128.c1
-rw-r--r--crypto/rmd160.c1
-rw-r--r--crypto/rmd256.c1
-rw-r--r--crypto/rmd320.c1
-rw-r--r--crypto/salsa20_generic.c2
-rw-r--r--crypto/seed.c1
-rw-r--r--crypto/seqiv.c1
-rw-r--r--crypto/serpent_generic.c4
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c4
-rw-r--r--crypto/sha512_generic.c4
-rw-r--r--crypto/tcrypt.c32
-rw-r--r--crypto/tea.c4
-rw-r--r--crypto/testmgr.c3
-rw-r--r--crypto/tgr192.c4
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/vmac.c1
-rw-r--r--crypto/wp512.c4
-rw-r--r--crypto/xcbc.c1
-rw-r--r--crypto/xts.c1
-rw-r--r--crypto/zlib.c1
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c15
-rw-r--r--drivers/char/hw_random/core.c12
-rw-r--r--drivers/crypto/bfin_crc.c2
-rw-r--r--drivers/crypto/caam/caamalg.c1904
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/desc_constr.h2
-rw-r--r--drivers/crypto/caam/error.c25
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c12
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c61
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c13
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c12
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c66
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c81
-rw-r--r--drivers/crypto/nx/nx-sha256.c208
-rw-r--r--drivers/crypto/nx/nx-sha512.c222
-rw-r--r--drivers/crypto/nx/nx.c127
-rw-r--r--drivers/crypto/nx/nx.h8
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c3
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c15
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h9
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c37
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c3
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c12
-rw-r--r--drivers/crypto/sahara.c794
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c6
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c10
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--include/crypto/hash.h492
-rw-r--r--include/crypto/if_alg.h1
-rw-r--r--include/crypto/rng.h80
-rw-r--r--include/linux/crypto.h1112
-rw-r--r--include/net/sock.h1
-rw-r--r--include/uapi/linux/if_alg.h2
-rw-r--r--net/core/sock.c24
167 files changed, 6836 insertions, 814 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index bec06659e0eb..9c7d92d03f62 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -15,7 +15,7 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
15 80211.xml debugobjects.xml sh.xml regulator.xml \ 15 80211.xml debugobjects.xml sh.xml regulator.xml \
16 alsa-driver-api.xml writing-an-alsa-driver.xml \ 16 alsa-driver-api.xml writing-an-alsa-driver.xml \
17 tracepoint.xml drm.xml media_api.xml w1.xml \ 17 tracepoint.xml drm.xml media_api.xml w1.xml \
18 writing_musb_glue_layer.xml 18 writing_musb_glue_layer.xml crypto-API.xml
19 19
20include Documentation/DocBook/media/Makefile 20include Documentation/DocBook/media/Makefile
21 21
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
new file mode 100644
index 000000000000..c763d30f4893
--- /dev/null
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -0,0 +1,1253 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4
5<book id="KernelCryptoAPI">
6 <bookinfo>
7 <title>Linux Kernel Crypto API</title>
8
9 <authorgroup>
10 <author>
11 <firstname>Stephan</firstname>
12 <surname>Mueller</surname>
13 <affiliation>
14 <address>
15 <email>smueller@chronox.de</email>
16 </address>
17 </affiliation>
18 </author>
19 <author>
20 <firstname>Marek</firstname>
21 <surname>Vasut</surname>
22 <affiliation>
23 <address>
24 <email>marek@denx.de</email>
25 </address>
26 </affiliation>
27 </author>
28 </authorgroup>
29
30 <copyright>
31 <year>2014</year>
32 <holder>Stephan Mueller</holder>
33 </copyright>
34
35
36 <legalnotice>
37 <para>
38 This documentation is free software; you can redistribute
39 it and/or modify it under the terms of the GNU General Public
40 License as published by the Free Software Foundation; either
41 version 2 of the License, or (at your option) any later
42 version.
43 </para>
44
45 <para>
46 This program is distributed in the hope that it will be
47 useful, but WITHOUT ANY WARRANTY; without even the implied
48 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
49 See the GNU General Public License for more details.
50 </para>
51
52 <para>
53 You should have received a copy of the GNU General Public
54 License along with this program; if not, write to the Free
55 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
56 MA 02111-1307 USA
57 </para>
58
59 <para>
60 For more details see the file COPYING in the source
61 distribution of Linux.
62 </para>
63 </legalnotice>
64 </bookinfo>
65
66 <toc></toc>
67
68 <chapter id="Intro">
69 <title>Kernel Crypto API Interface Specification</title>
70
71 <sect1><title>Introduction</title>
72
73 <para>
74 The kernel crypto API offers a rich set of cryptographic ciphers as
75 well as other data transformation mechanisms and methods to invoke
76 these. This document contains a description of the API and provides
77 example code.
78 </para>
79
80 <para>
81 To understand and properly use the kernel crypto API a brief
82 explanation of its structure is given. Based on the architecture,
83 the API can be separated into different components. Following the
84 architecture specification, hints to developers of ciphers are
85 provided. Pointers to the API function call documentation are
86 given at the end.
87 </para>
88
89 <para>
90 The kernel crypto API refers to all algorithms as "transformations".
91 Therefore, a cipher handle variable usually has the name "tfm".
92 Besides cryptographic operations, the kernel crypto API also knows
93 compression transformations and handles them the same way as ciphers.
94 </para>
95
96 <para>
97 The kernel crypto API serves the following entity types:
98
99 <itemizedlist>
100 <listitem>
101 <para>consumers requesting cryptographic services</para>
102 </listitem>
103 <listitem>
104 <para>data transformation implementations (typically ciphers)
105 that can be called by consumers using the kernel crypto
106 API</para>
107 </listitem>
108 </itemizedlist>
109 </para>
110
111 <para>
112 This specification is intended for consumers of the kernel crypto
113 API as well as for developers implementing ciphers. This API
114 specification, however, does not discusses all API calls available
115 to data transformation implementations (i.e. implementations of
116 ciphers and other transformations (such as CRC or even compression
117 algorithms) that can register with the kernel crypto API).
118 </para>
119
120 <para>
121 Note: The terms "transformation" and cipher algorithm are used
122 interchangably.
123 </para>
124 </sect1>
125
126 <sect1><title>Terminology</title>
127 <para>
128 The transformation implementation is an actual code or interface
129 to hardware which implements a certain transformation with precisely
130 defined behavior.
131 </para>
132
133 <para>
134 The transformation object (TFM) is an instance of a transformation
135 implementation. There can be multiple transformation objects
136 associated with a single transformation implementation. Each of
137 those transformation objects is held by a crypto API consumer or
138 another transformation. Transformation object is allocated when a
139 crypto API consumer requests a transformation implementation.
140 The consumer is then provided with a structure, which contains
141 a transformation object (TFM).
142 </para>
143
144 <para>
145 The structure that contains transformation objects may also be
146 referred to as a "cipher handle". Such a cipher handle is always
147 subject to the following phases that are reflected in the API calls
148 applicable to such a cipher handle:
149 </para>
150
151 <orderedlist>
152 <listitem>
153 <para>Initialization of a cipher handle.</para>
154 </listitem>
155 <listitem>
156 <para>Execution of all intended cipher operations applicable
157 for the handle where the cipher handle must be furnished to
158 every API call.</para>
159 </listitem>
160 <listitem>
161 <para>Destruction of a cipher handle.</para>
162 </listitem>
163 </orderedlist>
164
165 <para>
166 When using the initialization API calls, a cipher handle is
167 created and returned to the consumer. Therefore, please refer
168 to all initialization API calls that refer to the data
169 structure type a consumer is expected to receive and subsequently
170 to use. The initialization API calls have all the same naming
171 conventions of crypto_alloc_*.
172 </para>
173
174 <para>
175 The transformation context is private data associated with
176 the transformation object.
177 </para>
178 </sect1>
179 </chapter>
180
181 <chapter id="Architecture"><title>Kernel Crypto API Architecture</title>
182 <sect1><title>Cipher algorithm types</title>
183 <para>
184 The kernel crypto API provides different API calls for the
185 following cipher types:
186
187 <itemizedlist>
188 <listitem><para>Symmetric ciphers</para></listitem>
189 <listitem><para>AEAD ciphers</para></listitem>
190 <listitem><para>Message digest, including keyed message digest</para></listitem>
191 <listitem><para>Random number generation</para></listitem>
192 <listitem><para>User space interface</para></listitem>
193 </itemizedlist>
194 </para>
195 </sect1>
196
197 <sect1><title>Ciphers And Templates</title>
198 <para>
199 The kernel crypto API provides implementations of single block
200 ciphers and message digests. In addition, the kernel crypto API
201 provides numerous "templates" that can be used in conjunction
202 with the single block ciphers and message digests. Templates
203 include all types of block chaining mode, the HMAC mechanism, etc.
204 </para>
205
206 <para>
207 Single block ciphers and message digests can either be directly
208 used by a caller or invoked together with a template to form
209 multi-block ciphers or keyed message digests.
210 </para>
211
212 <para>
213 A single block cipher may even be called with multiple templates.
214 However, templates cannot be used without a single cipher.
215 </para>
216
217 <para>
218 See /proc/crypto and search for "name". For example:
219
220 <itemizedlist>
221 <listitem><para>aes</para></listitem>
222 <listitem><para>ecb(aes)</para></listitem>
223 <listitem><para>cmac(aes)</para></listitem>
224 <listitem><para>ccm(aes)</para></listitem>
225 <listitem><para>rfc4106(gcm(aes))</para></listitem>
226 <listitem><para>sha1</para></listitem>
227 <listitem><para>hmac(sha1)</para></listitem>
228 <listitem><para>authenc(hmac(sha1),cbc(aes))</para></listitem>
229 </itemizedlist>
230 </para>
231
232 <para>
233 In these examples, "aes" and "sha1" are the ciphers and all
234 others are the templates.
235 </para>
236 </sect1>
237
238 <sect1><title>Synchronous And Asynchronous Operation</title>
239 <para>
240 The kernel crypto API provides synchronous and asynchronous
241 API operations.
242 </para>
243
244 <para>
245 When using the synchronous API operation, the caller invokes
246 a cipher operation which is performed synchronously by the
247 kernel crypto API. That means, the caller waits until the
248 cipher operation completes. Therefore, the kernel crypto API
249 calls work like regular function calls. For synchronous
250 operation, the set of API calls is small and conceptually
251 similar to any other crypto library.
252 </para>
253
254 <para>
255 Asynchronous operation is provided by the kernel crypto API
256 which implies that the invocation of a cipher operation will
257 complete almost instantly. That invocation triggers the
258 cipher operation but it does not signal its completion. Before
259 invoking a cipher operation, the caller must provide a callback
260 function the kernel crypto API can invoke to signal the
261 completion of the cipher operation. Furthermore, the caller
262 must ensure it can handle such asynchronous events by applying
263 appropriate locking around its data. The kernel crypto API
264 does not perform any special serialization operation to protect
265 the caller's data integrity.
266 </para>
267 </sect1>
268
269 <sect1><title>Crypto API Cipher References And Priority</title>
270 <para>
271 A cipher is referenced by the caller with a string. That string
272 has the following semantics:
273
274 <programlisting>
275 template(single block cipher)
276 </programlisting>
277
278 where "template" and "single block cipher" is the aforementioned
279 template and single block cipher, respectively. If applicable,
280 additional templates may enclose other templates, such as
281
282 <programlisting>
283 template1(template2(single block cipher)))
284 </programlisting>
285 </para>
286
287 <para>
288 The kernel crypto API may provide multiple implementations of a
289 template or a single block cipher. For example, AES on newer
290 Intel hardware has the following implementations: AES-NI,
291 assembler implementation, or straight C. Now, when using the
292 string "aes" with the kernel crypto API, which cipher
293 implementation is used? The answer to that question is the
294 priority number assigned to each cipher implementation by the
295 kernel crypto API. When a caller uses the string to refer to a
296 cipher during initialization of a cipher handle, the kernel
297 crypto API looks up all implementations providing an
298 implementation with that name and selects the implementation
299 with the highest priority.
300 </para>
301
302 <para>
303 Now, a caller may have the need to refer to a specific cipher
304 implementation and thus does not want to rely on the
305 priority-based selection. To accommodate this scenario, the
306 kernel crypto API allows the cipher implementation to register
307 a unique name in addition to common names. When using that
308 unique name, a caller is therefore always sure to refer to
309 the intended cipher implementation.
310 </para>
311
312 <para>
313 The list of available ciphers is given in /proc/crypto. However,
314 that list does not specify all possible permutations of
315 templates and ciphers. Each block listed in /proc/crypto may
316 contain the following information -- if one of the components
317 listed as follows are not applicable to a cipher, it is not
318 displayed:
319 </para>
320
321 <itemizedlist>
322 <listitem>
323 <para>name: the generic name of the cipher that is subject
324 to the priority-based selection -- this name can be used by
325 the cipher allocation API calls (all names listed above are
326 examples for such generic names)</para>
327 </listitem>
328 <listitem>
329 <para>driver: the unique name of the cipher -- this name can
330 be used by the cipher allocation API calls</para>
331 </listitem>
332 <listitem>
333 <para>module: the kernel module providing the cipher
334 implementation (or "kernel" for statically linked ciphers)</para>
335 </listitem>
336 <listitem>
337 <para>priority: the priority value of the cipher implementation</para>
338 </listitem>
339 <listitem>
340 <para>refcnt: the reference count of the respective cipher
341 (i.e. the number of current consumers of this cipher)</para>
342 </listitem>
343 <listitem>
344 <para>selftest: specification whether the self test for the
345 cipher passed</para>
346 </listitem>
347 <listitem>
348 <para>type:
349 <itemizedlist>
350 <listitem>
351 <para>blkcipher for synchronous block ciphers</para>
352 </listitem>
353 <listitem>
354 <para>ablkcipher for asynchronous block ciphers</para>
355 </listitem>
356 <listitem>
357 <para>cipher for single block ciphers that may be used with
358 an additional template</para>
359 </listitem>
360 <listitem>
361 <para>shash for synchronous message digest</para>
362 </listitem>
363 <listitem>
364 <para>ahash for asynchronous message digest</para>
365 </listitem>
366 <listitem>
367 <para>aead for AEAD cipher type</para>
368 </listitem>
369 <listitem>
370 <para>compression for compression type transformations</para>
371 </listitem>
372 <listitem>
373 <para>rng for random number generator</para>
374 </listitem>
375 <listitem>
376 <para>givcipher for cipher with associated IV generator
377 (see the geniv entry below for the specification of the
378 IV generator type used by the cipher implementation)</para>
379 </listitem>
380 </itemizedlist>
381 </para>
382 </listitem>
383 <listitem>
384 <para>blocksize: blocksize of cipher in bytes</para>
385 </listitem>
386 <listitem>
387 <para>keysize: key size in bytes</para>
388 </listitem>
389 <listitem>
390 <para>ivsize: IV size in bytes</para>
391 </listitem>
392 <listitem>
393 <para>seedsize: required size of seed data for random number
394 generator</para>
395 </listitem>
396 <listitem>
397 <para>digestsize: output size of the message digest</para>
398 </listitem>
399 <listitem>
400 <para>geniv: IV generation type:
401 <itemizedlist>
402 <listitem>
403 <para>eseqiv for encrypted sequence number based IV
404 generation</para>
405 </listitem>
406 <listitem>
407 <para>seqiv for sequence number based IV generation</para>
408 </listitem>
409 <listitem>
410 <para>chainiv for chain iv generation</para>
411 </listitem>
412 <listitem>
413 <para>&lt;builtin&gt; is a marker that the cipher implements
414 IV generation and handling as it is specific to the given
415 cipher</para>
416 </listitem>
417 </itemizedlist>
418 </para>
419 </listitem>
420 </itemizedlist>
421 </sect1>
422
423 <sect1><title>Key Sizes</title>
424 <para>
425 When allocating a cipher handle, the caller only specifies the
426 cipher type. Symmetric ciphers, however, typically support
427 multiple key sizes (e.g. AES-128 vs. AES-192 vs. AES-256).
428 These key sizes are determined with the length of the provided
429 key. Thus, the kernel crypto API does not provide a separate
430 way to select the particular symmetric cipher key size.
431 </para>
432 </sect1>
433
434 <sect1><title>Cipher Allocation Type And Masks</title>
435 <para>
436 The different cipher handle allocation functions allow the
437 specification of a type and mask flag. Both parameters have
438 the following meaning (and are therefore not covered in the
439 subsequent sections).
440 </para>
441
442 <para>
443 The type flag specifies the type of the cipher algorithm.
444 The caller usually provides a 0 when the caller wants the
445 default handling. Otherwise, the caller may provide the
446 following selections which match the the aforementioned
447 cipher types:
448 </para>
449
450 <itemizedlist>
451 <listitem>
452 <para>CRYPTO_ALG_TYPE_CIPHER Single block cipher</para>
453 </listitem>
454 <listitem>
455 <para>CRYPTO_ALG_TYPE_COMPRESS Compression</para>
456 </listitem>
457 <listitem>
458 <para>CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with
459 Associated Data (MAC)</para>
460 </listitem>
461 <listitem>
462 <para>CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher</para>
463 </listitem>
464 <listitem>
465 <para>CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher</para>
466 </listitem>
467 <listitem>
468 <para>CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block
469 cipher packed together with an IV generator (see geniv field
470 in the /proc/crypto listing for the known IV generators)</para>
471 </listitem>
472 <listitem>
473 <para>CRYPTO_ALG_TYPE_DIGEST Raw message digest</para>
474 </listitem>
475 <listitem>
476 <para>CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST</para>
477 </listitem>
478 <listitem>
479 <para>CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash</para>
480 </listitem>
481 <listitem>
482 <para>CRYPTO_ALG_TYPE_AHASH Asynchronous multi-block hash</para>
483 </listitem>
484 <listitem>
485 <para>CRYPTO_ALG_TYPE_RNG Random Number Generation</para>
486 </listitem>
487 <listitem>
488 <para>CRYPTO_ALG_TYPE_PCOMPRESS Enhanced version of
489 CRYPTO_ALG_TYPE_COMPRESS allowing for segmented compression /
490 decompression instead of performing the operation on one
491 segment only. CRYPTO_ALG_TYPE_PCOMPRESS is intended to replace
492 CRYPTO_ALG_TYPE_COMPRESS once existing consumers are converted.</para>
493 </listitem>
494 </itemizedlist>
495
496 <para>
497 The mask flag restricts the type of cipher. The only allowed
498 flag is CRYPTO_ALG_ASYNC to restrict the cipher lookup function
499 to asynchronous ciphers. Usually, a caller provides a 0 for the
500 mask flag.
501 </para>
502
503 <para>
504 When the caller provides a mask and type specification, the
505 caller limits the search the kernel crypto API can perform for
506 a suitable cipher implementation for the given cipher name.
507 That means, even when a caller uses a cipher name that exists
508 during its initialization call, the kernel crypto API may not
509 select it due to the used type and mask field.
510 </para>
511 </sect1>
512 </chapter>
513
514 <chapter id="Development"><title>Developing Cipher Algorithms</title>
515 <sect1><title>Registering And Unregistering Transformation</title>
516 <para>
517 There are three distinct types of registration functions in
518 the Crypto API. One is used to register a generic cryptographic
519 transformation, while the other two are specific to HASH
520 transformations and COMPRESSion. We will discuss the latter
521 two in a separate chapter, here we will only look at the
522 generic ones.
523 </para>
524
525 <para>
526 Before discussing the register functions, the data structure
527 to be filled with each, struct crypto_alg, must be considered
528 -- see below for a description of this data structure.
529 </para>
530
531 <para>
532 The generic registration functions can be found in
533 include/linux/crypto.h and their definition can be seen below.
534 The former function registers a single transformation, while
535 the latter works on an array of transformation descriptions.
536 The latter is useful when registering transformations in bulk.
537 </para>
538
539 <programlisting>
540 int crypto_register_alg(struct crypto_alg *alg);
541 int crypto_register_algs(struct crypto_alg *algs, int count);
542 </programlisting>
543
544 <para>
545 The counterparts to those functions are listed below.
546 </para>
547
548 <programlisting>
549 int crypto_unregister_alg(struct crypto_alg *alg);
550 int crypto_unregister_algs(struct crypto_alg *algs, int count);
551 </programlisting>
552
553 <para>
554 Notice that both registration and unregistration functions
555 do return a value, so make sure to handle errors. A return
556 code of zero implies success. Any return code &lt; 0 implies
557 an error.
558 </para>
559
560 <para>
561 The bulk registration / unregistration functions require
562 that struct crypto_alg is an array of count size. These
563 functions simply loop over that array and register /
564 unregister each individual algorithm. If an error occurs,
565 the loop is terminated at the offending algorithm definition.
566 That means, the algorithms prior to the offending algorithm
567 are successfully registered. Note, the caller has no way of
568 knowing which cipher implementations have successfully
569 registered. If this is important to know, the caller should
570 loop through the different implementations using the single
571 instance *_alg functions for each individual implementation.
572 </para>
573 </sect1>
574
575 <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
576 <para>
577 Example of transformations: aes, arc4, ...
578 </para>
579
580 <para>
581 This section describes the simplest of all transformation
582 implementations, that being the CIPHER type used for symmetric
583 ciphers. The CIPHER type is used for transformations which
584 operate on exactly one block at a time and there are no
585 dependencies between blocks at all.
586 </para>
587
588 <sect2><title>Registration specifics</title>
589 <para>
590 The registration of [CIPHER] algorithm is specific in that
591 struct crypto_alg field .cra_type is empty. The .cra_u.cipher
592 has to be filled in with proper callbacks to implement this
593 transformation.
594 </para>
595
596 <para>
597 See struct cipher_alg below.
598 </para>
599 </sect2>
600
601 <sect2><title>Cipher Definition With struct cipher_alg</title>
602 <para>
603 Struct cipher_alg defines a single block cipher.
604 </para>
605
606 <para>
607 Here are schematics of how these functions are called when
608 operated from other part of the kernel. Note that the
609 .cia_setkey() call might happen before or after any of these
610 schematics happen, but must not happen during any of these
611 are in-flight.
612 </para>
613
614 <para>
615 <programlisting>
616 KEY ---. PLAINTEXT ---.
617 v v
618 .cia_setkey() -&gt; .cia_encrypt()
619 |
620 '-----&gt; CIPHERTEXT
621 </programlisting>
622 </para>
623
624 <para>
625 Please note that a pattern where .cia_setkey() is called
626 multiple times is also valid:
627 </para>
628
629 <para>
630 <programlisting>
631
632 KEY1 --. PLAINTEXT1 --. KEY2 --. PLAINTEXT2 --.
633 v v v v
634 .cia_setkey() -&gt; .cia_encrypt() -&gt; .cia_setkey() -&gt; .cia_encrypt()
635 | |
636 '---&gt; CIPHERTEXT1 '---&gt; CIPHERTEXT2
637 </programlisting>
638 </para>
639
640 </sect2>
641 </sect1>
642
643 <sect1><title>Multi-Block Ciphers [BLKCIPHER] [ABLKCIPHER]</title>
644 <para>
645 Example of transformations: cbc(aes), ecb(arc4), ...
646 </para>
647
648 <para>
649 This section describes the multi-block cipher transformation
650 implementations for both synchronous [BLKCIPHER] and
651 asynchronous [ABLKCIPHER] case. The multi-block ciphers are
652 used for transformations which operate on scatterlists of
653 data supplied to the transformation functions. They output
654 the result into a scatterlist of data as well.
655 </para>
656
657 <sect2><title>Registration Specifics</title>
658
659 <para>
660 The registration of [BLKCIPHER] or [ABLKCIPHER] algorithms
661 is one of the most standard procedures throughout the crypto API.
662 </para>
663
664 <para>
665 Note, if a cipher implementation requires a proper alignment
666 of data, the caller should use the functions of
667 crypto_blkcipher_alignmask() or crypto_ablkcipher_alignmask()
668 respectively to identify a memory alignment mask. The kernel
669 crypto API is able to process requests that are unaligned.
670 This implies, however, additional overhead as the kernel
671 crypto API needs to perform the realignment of the data which
672 may imply moving of data.
673 </para>
674 </sect2>
675
676 <sect2><title>Cipher Definition With struct blkcipher_alg and ablkcipher_alg</title>
677 <para>
678 Struct blkcipher_alg defines a synchronous block cipher whereas
679 struct ablkcipher_alg defines an asynchronous block cipher.
680 </para>
681
682 <para>
683 Please refer to the single block cipher description for schematics
684 of the block cipher usage. The usage patterns are exactly the same
685 for [ABLKCIPHER] and [BLKCIPHER] as they are for plain [CIPHER].
686 </para>
687 </sect2>
688
689 <sect2><title>Specifics Of Asynchronous Multi-Block Cipher</title>
690 <para>
691 There are a couple of specifics to the [ABLKCIPHER] interface.
692 </para>
693
694 <para>
695 First of all, some of the drivers will want to use the
696 Generic ScatterWalk in case the hardware needs to be fed
697 separate chunks of the scatterlist which contains the
698 plaintext and will contain the ciphertext. Please refer
699 to the ScatterWalk interface offered by the Linux kernel
700 scatter / gather list implementation.
701 </para>
702 </sect2>
703 </sect1>
704
705 <sect1><title>Hashing [HASH]</title>
706
707 <para>
708 Example of transformations: crc32, md5, sha1, sha256,...
709 </para>
710
711 <sect2><title>Registering And Unregistering The Transformation</title>
712
713 <para>
714 There are multiple ways to register a HASH transformation,
715 depending on whether the transformation is synchronous [SHASH]
716 or asynchronous [AHASH] and the amount of HASH transformations
717 we are registering. You can find the prototypes defined in
718 include/crypto/internal/hash.h:
719 </para>
720
721 <programlisting>
722 int crypto_register_ahash(struct ahash_alg *alg);
723
724 int crypto_register_shash(struct shash_alg *alg);
725 int crypto_register_shashes(struct shash_alg *algs, int count);
726 </programlisting>
727
728 <para>
729 The respective counterparts for unregistering the HASH
730 transformation are as follows:
731 </para>
732
733 <programlisting>
734 int crypto_unregister_ahash(struct ahash_alg *alg);
735
736 int crypto_unregister_shash(struct shash_alg *alg);
737 int crypto_unregister_shashes(struct shash_alg *algs, int count);
738 </programlisting>
739 </sect2>
740
741 <sect2><title>Cipher Definition With struct shash_alg and ahash_alg</title>
742 <para>
743 Here are schematics of how these functions are called when
744 operated from other part of the kernel. Note that the .setkey()
745 call might happen before or after any of these schematics happen,
746 but must not happen during any of these are in-flight. Please note
747 that calling .init() followed immediately by .finish() is also a
748 perfectly valid transformation.
749 </para>
750
751 <programlisting>
752 I) DATA -----------.
753 v
754 .init() -&gt; .update() -&gt; .final() ! .update() might not be called
755 ^ | | at all in this scenario.
756 '----' '---&gt; HASH
757
758 II) DATA -----------.-----------.
759 v v
760 .init() -&gt; .update() -&gt; .finup() ! .update() may not be called
761 ^ | | at all in this scenario.
762 '----' '---&gt; HASH
763
764 III) DATA -----------.
765 v
766 .digest() ! The entire process is handled
767 | by the .digest() call.
768 '---------------&gt; HASH
769 </programlisting>
770
771 <para>
772 Here is a schematic of how the .export()/.import() functions are
773 called when used from another part of the kernel.
774 </para>
775
776 <programlisting>
777 KEY--. DATA--.
778 v v ! .update() may not be called
779 .setkey() -&gt; .init() -&gt; .update() -&gt; .export() at all in this scenario.
780 ^ | |
781 '-----' '--&gt; PARTIAL_HASH
782
783 ----------- other transformations happen here -----------
784
785 PARTIAL_HASH--. DATA1--.
786 v v
787 .import -&gt; .update() -&gt; .final() ! .update() may not be called
788 ^ | | at all in this scenario.
789 '----' '--&gt; HASH1
790
791 PARTIAL_HASH--. DATA2-.
792 v v
793 .import -&gt; .finup()
794 |
795 '---------------&gt; HASH2
796 </programlisting>
797 </sect2>
798
799 <sect2><title>Specifics Of Asynchronous HASH Transformation</title>
800 <para>
801 Some of the drivers will want to use the Generic ScatterWalk
802 in case the implementation needs to be fed separate chunks of the
803 scatterlist which contains the input data. The buffer containing
804 the resulting hash will always be properly aligned to
805 .cra_alignmask so there is no need to worry about this.
806 </para>
807 </sect2>
808 </sect1>
809 </chapter>
810
811 <chapter id="API"><title>Programming Interface</title>
812 <sect1><title>Block Cipher Context Data Structures</title>
813!Pinclude/linux/crypto.h Block Cipher Context Data Structures
814!Finclude/linux/crypto.h aead_request
815 </sect1>
816 <sect1><title>Block Cipher Algorithm Definitions</title>
817!Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
818!Finclude/linux/crypto.h crypto_alg
819!Finclude/linux/crypto.h ablkcipher_alg
820!Finclude/linux/crypto.h aead_alg
821!Finclude/linux/crypto.h blkcipher_alg
822!Finclude/linux/crypto.h cipher_alg
823!Finclude/linux/crypto.h rng_alg
824 </sect1>
825 <sect1><title>Asynchronous Block Cipher API</title>
826!Pinclude/linux/crypto.h Asynchronous Block Cipher API
827!Finclude/linux/crypto.h crypto_alloc_ablkcipher
828!Finclude/linux/crypto.h crypto_free_ablkcipher
829!Finclude/linux/crypto.h crypto_has_ablkcipher
830!Finclude/linux/crypto.h crypto_ablkcipher_ivsize
831!Finclude/linux/crypto.h crypto_ablkcipher_blocksize
832!Finclude/linux/crypto.h crypto_ablkcipher_setkey
833!Finclude/linux/crypto.h crypto_ablkcipher_reqtfm
834!Finclude/linux/crypto.h crypto_ablkcipher_encrypt
835!Finclude/linux/crypto.h crypto_ablkcipher_decrypt
836 </sect1>
837 <sect1><title>Asynchronous Cipher Request Handle</title>
838!Pinclude/linux/crypto.h Asynchronous Cipher Request Handle
839!Finclude/linux/crypto.h crypto_ablkcipher_reqsize
840!Finclude/linux/crypto.h ablkcipher_request_set_tfm
841!Finclude/linux/crypto.h ablkcipher_request_alloc
842!Finclude/linux/crypto.h ablkcipher_request_free
843!Finclude/linux/crypto.h ablkcipher_request_set_callback
844!Finclude/linux/crypto.h ablkcipher_request_set_crypt
845 </sect1>
846 <sect1><title>Authenticated Encryption With Associated Data (AEAD) Cipher API</title>
847!Pinclude/linux/crypto.h Authenticated Encryption With Associated Data (AEAD) Cipher API
848!Finclude/linux/crypto.h crypto_alloc_aead
849!Finclude/linux/crypto.h crypto_free_aead
850!Finclude/linux/crypto.h crypto_aead_ivsize
851!Finclude/linux/crypto.h crypto_aead_authsize
852!Finclude/linux/crypto.h crypto_aead_blocksize
853!Finclude/linux/crypto.h crypto_aead_setkey
854!Finclude/linux/crypto.h crypto_aead_setauthsize
855!Finclude/linux/crypto.h crypto_aead_encrypt
856!Finclude/linux/crypto.h crypto_aead_decrypt
857 </sect1>
858 <sect1><title>Asynchronous AEAD Request Handle</title>
859!Pinclude/linux/crypto.h Asynchronous AEAD Request Handle
860!Finclude/linux/crypto.h crypto_aead_reqsize
861!Finclude/linux/crypto.h aead_request_set_tfm
862!Finclude/linux/crypto.h aead_request_alloc
863!Finclude/linux/crypto.h aead_request_free
864!Finclude/linux/crypto.h aead_request_set_callback
865!Finclude/linux/crypto.h aead_request_set_crypt
866!Finclude/linux/crypto.h aead_request_set_assoc
867 </sect1>
868 <sect1><title>Synchronous Block Cipher API</title>
869!Pinclude/linux/crypto.h Synchronous Block Cipher API
870!Finclude/linux/crypto.h crypto_alloc_blkcipher
871!Finclude/linux/crypto.h crypto_free_blkcipher
872!Finclude/linux/crypto.h crypto_has_blkcipher
873!Finclude/linux/crypto.h crypto_blkcipher_name
874!Finclude/linux/crypto.h crypto_blkcipher_ivsize
875!Finclude/linux/crypto.h crypto_blkcipher_blocksize
876!Finclude/linux/crypto.h crypto_blkcipher_setkey
877!Finclude/linux/crypto.h crypto_blkcipher_encrypt
878!Finclude/linux/crypto.h crypto_blkcipher_encrypt_iv
879!Finclude/linux/crypto.h crypto_blkcipher_decrypt
880!Finclude/linux/crypto.h crypto_blkcipher_decrypt_iv
881!Finclude/linux/crypto.h crypto_blkcipher_set_iv
882!Finclude/linux/crypto.h crypto_blkcipher_get_iv
883 </sect1>
884 <sect1><title>Single Block Cipher API</title>
885!Pinclude/linux/crypto.h Single Block Cipher API
886!Finclude/linux/crypto.h crypto_alloc_cipher
887!Finclude/linux/crypto.h crypto_free_cipher
888!Finclude/linux/crypto.h crypto_has_cipher
889!Finclude/linux/crypto.h crypto_cipher_blocksize
890!Finclude/linux/crypto.h crypto_cipher_setkey
891!Finclude/linux/crypto.h crypto_cipher_encrypt_one
892!Finclude/linux/crypto.h crypto_cipher_decrypt_one
893 </sect1>
894 <sect1><title>Synchronous Message Digest API</title>
895!Pinclude/linux/crypto.h Synchronous Message Digest API
896!Finclude/linux/crypto.h crypto_alloc_hash
897!Finclude/linux/crypto.h crypto_free_hash
898!Finclude/linux/crypto.h crypto_has_hash
899!Finclude/linux/crypto.h crypto_hash_blocksize
900!Finclude/linux/crypto.h crypto_hash_digestsize
901!Finclude/linux/crypto.h crypto_hash_init
902!Finclude/linux/crypto.h crypto_hash_update
903!Finclude/linux/crypto.h crypto_hash_final
904!Finclude/linux/crypto.h crypto_hash_digest
905!Finclude/linux/crypto.h crypto_hash_setkey
906 </sect1>
907 <sect1><title>Message Digest Algorithm Definitions</title>
908!Pinclude/crypto/hash.h Message Digest Algorithm Definitions
909!Finclude/crypto/hash.h hash_alg_common
910!Finclude/crypto/hash.h ahash_alg
911!Finclude/crypto/hash.h shash_alg
912 </sect1>
913 <sect1><title>Asynchronous Message Digest API</title>
914!Pinclude/crypto/hash.h Asynchronous Message Digest API
915!Finclude/crypto/hash.h crypto_alloc_ahash
916!Finclude/crypto/hash.h crypto_free_ahash
917!Finclude/crypto/hash.h crypto_ahash_init
918!Finclude/crypto/hash.h crypto_ahash_digestsize
919!Finclude/crypto/hash.h crypto_ahash_reqtfm
920!Finclude/crypto/hash.h crypto_ahash_reqsize
921!Finclude/crypto/hash.h crypto_ahash_setkey
922!Finclude/crypto/hash.h crypto_ahash_finup
923!Finclude/crypto/hash.h crypto_ahash_final
924!Finclude/crypto/hash.h crypto_ahash_digest
925!Finclude/crypto/hash.h crypto_ahash_export
926!Finclude/crypto/hash.h crypto_ahash_import
927 </sect1>
928 <sect1><title>Asynchronous Hash Request Handle</title>
929!Pinclude/crypto/hash.h Asynchronous Hash Request Handle
930!Finclude/crypto/hash.h ahash_request_set_tfm
931!Finclude/crypto/hash.h ahash_request_alloc
932!Finclude/crypto/hash.h ahash_request_free
933!Finclude/crypto/hash.h ahash_request_set_callback
934!Finclude/crypto/hash.h ahash_request_set_crypt
935 </sect1>
936 <sect1><title>Synchronous Message Digest API</title>
937!Pinclude/crypto/hash.h Synchronous Message Digest API
938!Finclude/crypto/hash.h crypto_alloc_shash
939!Finclude/crypto/hash.h crypto_free_shash
940!Finclude/crypto/hash.h crypto_shash_blocksize
941!Finclude/crypto/hash.h crypto_shash_digestsize
942!Finclude/crypto/hash.h crypto_shash_descsize
943!Finclude/crypto/hash.h crypto_shash_setkey
944!Finclude/crypto/hash.h crypto_shash_digest
945!Finclude/crypto/hash.h crypto_shash_export
946!Finclude/crypto/hash.h crypto_shash_import
947!Finclude/crypto/hash.h crypto_shash_init
948!Finclude/crypto/hash.h crypto_shash_update
949!Finclude/crypto/hash.h crypto_shash_final
950!Finclude/crypto/hash.h crypto_shash_finup
951 </sect1>
952 <sect1><title>Crypto API Random Number API</title>
953!Pinclude/crypto/rng.h Random number generator API
954!Finclude/crypto/rng.h crypto_alloc_rng
955!Finclude/crypto/rng.h crypto_rng_alg
956!Finclude/crypto/rng.h crypto_free_rng
957!Finclude/crypto/rng.h crypto_rng_get_bytes
958!Finclude/crypto/rng.h crypto_rng_reset
959!Finclude/crypto/rng.h crypto_rng_seedsize
960!Cinclude/crypto/rng.h
961 </sect1>
962 </chapter>
963
964 <chapter id="Code"><title>Code Examples</title>
965 <sect1><title>Code Example For Asynchronous Block Cipher Operation</title>
966 <programlisting>
967
968struct tcrypt_result {
969 struct completion completion;
970 int err;
971};
972
973/* tie all data structures together */
974struct ablkcipher_def {
975 struct scatterlist sg;
976 struct crypto_ablkcipher *tfm;
977 struct ablkcipher_request *req;
978 struct tcrypt_result result;
979};
980
981/* Callback function */
982static void test_ablkcipher_cb(struct crypto_async_request *req, int error)
983{
984 struct tcrypt_result *result = req-&gt;data;
985
986 if (error == -EINPROGRESS)
987 return;
988 result-&gt;err = error;
989 complete(&amp;result-&gt;completion);
990 pr_info("Encryption finished successfully\n");
991}
992
993/* Perform cipher operation */
994static unsigned int test_ablkcipher_encdec(struct ablkcipher_def *ablk,
995 int enc)
996{
997 int rc = 0;
998
999 if (enc)
1000 rc = crypto_ablkcipher_encrypt(ablk-&gt;req);
1001 else
1002 rc = crypto_ablkcipher_decrypt(ablk-&gt;req);
1003
1004 switch (rc) {
1005 case 0:
1006 break;
1007 case -EINPROGRESS:
1008 case -EBUSY:
1009 rc = wait_for_completion_interruptible(
1010 &amp;ablk-&gt;result.completion);
1011 if (!rc &amp;&amp; !ablk-&gt;result.err) {
1012 reinit_completion(&amp;ablk-&gt;result.completion);
1013 break;
1014 }
1015 default:
1016 pr_info("ablkcipher encrypt returned with %d result %d\n",
1017 rc, ablk-&gt;result.err);
1018 break;
1019 }
1020 init_completion(&amp;ablk-&gt;result.completion);
1021
1022 return rc;
1023}
1024
1025/* Initialize and trigger cipher operation */
1026static int test_ablkcipher(void)
1027{
1028 struct ablkcipher_def ablk;
1029 struct crypto_ablkcipher *ablkcipher = NULL;
1030 struct ablkcipher_request *req = NULL;
1031 char *scratchpad = NULL;
1032 char *ivdata = NULL;
1033 unsigned char key[32];
1034 int ret = -EFAULT;
1035
1036 ablkcipher = crypto_alloc_ablkcipher("cbc-aes-aesni", 0, 0);
1037 if (IS_ERR(ablkcipher)) {
1038 pr_info("could not allocate ablkcipher handle\n");
1039 return PTR_ERR(ablkcipher);
1040 }
1041
1042 req = ablkcipher_request_alloc(ablkcipher, GFP_KERNEL);
1043 if (IS_ERR(req)) {
1044 pr_info("could not allocate request queue\n");
1045 ret = PTR_ERR(req);
1046 goto out;
1047 }
1048
1049 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1050 test_ablkcipher_cb,
1051 &amp;ablk.result);
1052
1053 /* AES 256 with random key */
1054 get_random_bytes(&amp;key, 32);
1055 if (crypto_ablkcipher_setkey(ablkcipher, key, 32)) {
1056 pr_info("key could not be set\n");
1057 ret = -EAGAIN;
1058 goto out;
1059 }
1060
1061 /* IV will be random */
1062 ivdata = kmalloc(16, GFP_KERNEL);
1063 if (!ivdata) {
1064 pr_info("could not allocate ivdata\n");
1065 goto out;
1066 }
1067 get_random_bytes(ivdata, 16);
1068
1069 /* Input data will be random */
1070 scratchpad = kmalloc(16, GFP_KERNEL);
1071 if (!scratchpad) {
1072 pr_info("could not allocate scratchpad\n");
1073 goto out;
1074 }
1075 get_random_bytes(scratchpad, 16);
1076
1077 ablk.tfm = ablkcipher;
1078 ablk.req = req;
1079
1080 /* We encrypt one block */
1081 sg_init_one(&amp;ablk.sg, scratchpad, 16);
1082 ablkcipher_request_set_crypt(req, &amp;ablk.sg, &amp;ablk.sg, 16, ivdata);
1083 init_completion(&amp;ablk.result.completion);
1084
1085 /* encrypt data */
1086 ret = test_ablkcipher_encdec(&amp;ablk, 1);
1087 if (ret)
1088 goto out;
1089
1090 pr_info("Encryption triggered successfully\n");
1091
1092out:
1093 if (ablkcipher)
1094 crypto_free_ablkcipher(ablkcipher);
1095 if (req)
1096 ablkcipher_request_free(req);
1097 if (ivdata)
1098 kfree(ivdata);
1099 if (scratchpad)
1100 kfree(scratchpad);
1101 return ret;
1102}
1103 </programlisting>
1104 </sect1>
1105
1106 <sect1><title>Code Example For Synchronous Block Cipher Operation</title>
1107 <programlisting>
1108
1109static int test_blkcipher(void)
1110{
1111 struct crypto_blkcipher *blkcipher = NULL;
1112 char *cipher = "cbc(aes)";
1113 // AES 128
1114 charkey =
1115"\x12\x34\x56\x78\x90\xab\xcd\xef\x12\x34\x56\x78\x90\xab\xcd\xef";
1116 chariv =
1117"\x12\x34\x56\x78\x90\xab\xcd\xef\x12\x34\x56\x78\x90\xab\xcd\xef";
1118 unsigned int ivsize = 0;
1119 char *scratchpad = NULL; // holds plaintext and ciphertext
1120 struct scatterlist sg;
1121 struct blkcipher_desc desc;
1122 int ret = -EFAULT;
1123
1124 blkcipher = crypto_alloc_blkcipher(cipher, 0, 0);
1125 if (IS_ERR(blkcipher)) {
1126 printk("could not allocate blkcipher handle for %s\n", cipher);
1127 return -PTR_ERR(blkcipher);
1128 }
1129
1130 if (crypto_blkcipher_setkey(blkcipher, key, strlen(key))) {
1131 printk("key could not be set\n");
1132 ret = -EAGAIN;
1133 goto out;
1134 }
1135
1136 ivsize = crypto_blkcipher_ivsize(blkcipher);
1137 if (ivsize) {
1138 if (ivsize != strlen(iv))
1139 printk("IV length differs from expected length\n");
1140 crypto_blkcipher_set_iv(blkcipher, iv, ivsize);
1141 }
1142
1143 scratchpad = kmalloc(crypto_blkcipher_blocksize(blkcipher), GFP_KERNEL);
1144 if (!scratchpad) {
1145 printk("could not allocate scratchpad for %s\n", cipher);
1146 goto out;
1147 }
1148 /* get some random data that we want to encrypt */
1149 get_random_bytes(scratchpad, crypto_blkcipher_blocksize(blkcipher));
1150
1151 desc.flags = 0;
1152 desc.tfm = blkcipher;
1153 sg_init_one(&amp;sg, scratchpad, crypto_blkcipher_blocksize(blkcipher));
1154
1155 /* encrypt data in place */
1156 crypto_blkcipher_encrypt(&amp;desc, &amp;sg, &amp;sg,
1157 crypto_blkcipher_blocksize(blkcipher));
1158
1159 /* decrypt data in place
1160 * crypto_blkcipher_decrypt(&amp;desc, &amp;sg, &amp;sg,
1161 */ crypto_blkcipher_blocksize(blkcipher));
1162
1163
1164 printk("Cipher operation completed\n");
1165 return 0;
1166
1167out:
1168 if (blkcipher)
1169 crypto_free_blkcipher(blkcipher);
1170 if (scratchpad)
1171 kzfree(scratchpad);
1172 return ret;
1173}
1174 </programlisting>
1175 </sect1>
1176
1177 <sect1><title>Code Example For Use of Operational State Memory With SHASH</title>
1178 <programlisting>
1179
1180struct sdesc {
1181 struct shash_desc shash;
1182 char ctx[];
1183};
1184
1185static struct sdescinit_sdesc(struct crypto_shash *alg)
1186{
1187 struct sdescsdesc;
1188 int size;
1189
1190 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
1191 sdesc = kmalloc(size, GFP_KERNEL);
1192 if (!sdesc)
1193 return ERR_PTR(-ENOMEM);
1194 sdesc-&gt;shash.tfm = alg;
1195 sdesc-&gt;shash.flags = 0x0;
1196 return sdesc;
1197}
1198
1199static int calc_hash(struct crypto_shashalg,
1200 const unsigned chardata, unsigned int datalen,
1201 unsigned chardigest) {
1202 struct sdescsdesc;
1203 int ret;
1204
1205 sdesc = init_sdesc(alg);
1206 if (IS_ERR(sdesc)) {
1207 pr_info("trusted_key: can't alloc %s\n", hash_alg);
1208 return PTR_ERR(sdesc);
1209 }
1210
1211 ret = crypto_shash_digest(&amp;sdesc-&gt;shash, data, datalen, digest);
1212 kfree(sdesc);
1213 return ret;
1214}
1215 </programlisting>
1216 </sect1>
1217
1218 <sect1><title>Code Example For Random Number Generator Usage</title>
1219 <programlisting>
1220
1221static int get_random_numbers(u8 *buf, unsigned int len)
1222{
1223 struct crypto_rngrng = NULL;
1224 chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
1225 int ret;
1226
1227 if (!buf || !len) {
1228 pr_debug("No output buffer provided\n");
1229 return -EINVAL;
1230 }
1231
1232 rng = crypto_alloc_rng(drbg, 0, 0);
1233 if (IS_ERR(rng)) {
1234 pr_debug("could not allocate RNG handle for %s\n", drbg);
1235 return -PTR_ERR(rng);
1236 }
1237
1238 ret = crypto_rng_get_bytes(rng, buf, len);
1239 if (ret &lt; 0)
1240 pr_debug("generation of random numbers failed\n");
1241 else if (ret == 0)
1242 pr_debug("RNG returned no data");
1243 else
1244 pr_debug("RNG returned %d bytes of data\n", ret);
1245
1246out:
1247 crypto_free_rng(rng);
1248 return ret;
1249}
1250 </programlisting>
1251 </sect1>
1252 </chapter>
1253 </book>
diff --git a/Documentation/crypto/crypto-API-userspace.txt b/Documentation/crypto/crypto-API-userspace.txt
new file mode 100644
index 000000000000..ac619cd90300
--- /dev/null
+++ b/Documentation/crypto/crypto-API-userspace.txt
@@ -0,0 +1,205 @@
1Introduction
2============
3
4The concepts of the kernel crypto API visible to kernel space is fully
5applicable to the user space interface as well. Therefore, the kernel crypto API
6high level discussion for the in-kernel use cases applies here as well.
7
8The major difference, however, is that user space can only act as a consumer
9and never as a provider of a transformation or cipher algorithm.
10
11The following covers the user space interface exported by the kernel crypto
12API. A working example of this description is libkcapi that can be obtained from
13[1]. That library can be used by user space applications that require
14cryptographic services from the kernel.
15
16Some details of the in-kernel kernel crypto API aspects do not
17apply to user space, however. This includes the difference between synchronous
18and asynchronous invocations. The user space API call is fully synchronous.
19In addition, only a subset of all cipher types are available as documented
20below.
21
22
23User space API general remarks
24==============================
25
26The kernel crypto API is accessible from user space. Currently, the following
27ciphers are accessible:
28
29 * Message digest including keyed message digest (HMAC, CMAC)
30
31 * Symmetric ciphers
32
33Note, AEAD ciphers are currently not supported via the symmetric cipher
34interface.
35
36The interface is provided via Netlink using the type AF_ALG. In addition, the
37setsockopt option type is SOL_ALG. In case the user space header files do not
38export these flags yet, use the following macros:
39
40#ifndef AF_ALG
41#define AF_ALG 38
42#endif
43#ifndef SOL_ALG
44#define SOL_ALG 279
45#endif
46
47A cipher is accessed with the same name as done for the in-kernel API calls.
48This includes the generic vs. unique naming schema for ciphers as well as the
49enforcement of priorities for generic names.
50
51To interact with the kernel crypto API, a Netlink socket must be created by
52the user space application. User space invokes the cipher operation with the
53send/write system call family. The result of the cipher operation is obtained
54with the read/recv system call family.
55
56The following API calls assume that the Netlink socket descriptor is already
57opened by the user space application and discusses only the kernel crypto API
58specific invocations.
59
60To initialize a Netlink interface, the following sequence has to be performed
61by the consumer:
62
63 1. Create a socket of type AF_ALG with the struct sockaddr_alg parameter
64 specified below for the different cipher types.
65
66 2. Invoke bind with the socket descriptor
67
68 3. Invoke accept with the socket descriptor. The accept system call
69 returns a new file descriptor that is to be used to interact with
70 the particular cipher instance. When invoking send/write or recv/read
71 system calls to send data to the kernel or obtain data from the
72 kernel, the file descriptor returned by accept must be used.
73
74In-place cipher operation
75=========================
76
77Just like the in-kernel operation of the kernel crypto API, the user space
78interface allows the cipher operation in-place. That means that the input buffer
79used for the send/write system call and the output buffer used by the read/recv
80system call may be one and the same. This is of particular interest for
81symmetric cipher operations where a copying of the output data to its final
82destination can be avoided.
83
84If a consumer on the other hand wants to maintain the plaintext and the
85ciphertext in different memory locations, all a consumer needs to do is to
86provide different memory pointers for the encryption and decryption operation.
87
88Message digest API
89==================
90
91The message digest type to be used for the cipher operation is selected when
92invoking the bind syscall. bind requires the caller to provide a filled
93struct sockaddr data structure. This data structure must be filled as follows:
94
95struct sockaddr_alg sa = {
96 .salg_family = AF_ALG,
97 .salg_type = "hash", /* this selects the hash logic in the kernel */
98 .salg_name = "sha1" /* this is the cipher name */
99};
100
101The salg_type value "hash" applies to message digests and keyed message digests.
102Though, a keyed message digest is referenced by the appropriate salg_name.
103Please see below for the setsockopt interface that explains how the key can be
104set for a keyed message digest.
105
106Using the send() system call, the application provides the data that should be
107processed with the message digest. The send system call allows the following
108flags to be specified:
109
110 * MSG_MORE: If this flag is set, the send system call acts like a
111 message digest update function where the final hash is not
112 yet calculated. If the flag is not set, the send system call
113 calculates the final message digest immediately.
114
115With the recv() system call, the application can read the message digest from
116the kernel crypto API. If the buffer is too small for the message digest, the
117flag MSG_TRUNC is set by the kernel.
118
119In order to set a message digest key, the calling application must use the
120setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC operation is
121performed without the initial HMAC state change caused by the key.
122
123
124Symmetric cipher API
125====================
126
127The operation is very similar to the message digest discussion. During
128initialization, the struct sockaddr data structure must be filled as follows:
129
130struct sockaddr_alg sa = {
131 .salg_family = AF_ALG,
132 .salg_type = "skcipher", /* this selects the symmetric cipher */
133 .salg_name = "cbc(aes)" /* this is the cipher name */
134};
135
136Before data can be sent to the kernel using the write/send system call family,
137the consumer must set the key. The key setting is described with the setsockopt
138invocation below.
139
140Using the sendmsg() system call, the application provides the data that should
141be processed for encryption or decryption. In addition, the IV is specified
142with the data structure provided by the sendmsg() system call.
143
144The sendmsg system call parameter of struct msghdr is embedded into the
145struct cmsghdr data structure. See recv(2) and cmsg(3) for more information
146on how the cmsghdr data structure is used together with the send/recv system
147call family. That cmsghdr data structure holds the following information
148specified with a separate header instances:
149
150 * specification of the cipher operation type with one of these flags:
151 ALG_OP_ENCRYPT - encryption of data
152 ALG_OP_DECRYPT - decryption of data
153
154 * specification of the IV information marked with the flag ALG_SET_IV
155
156The send system call family allows the following flag to be specified:
157
158 * MSG_MORE: If this flag is set, the send system call acts like a
159 cipher update function where more input data is expected
160 with a subsequent invocation of the send system call.
161
162Note: The kernel reports -EINVAL for any unexpected data. The caller must
163make sure that all data matches the constraints given in /proc/crypto for the
164selected cipher.
165
166With the recv() system call, the application can read the result of the
167cipher operation from the kernel crypto API. The output buffer must be at least
168as large as to hold all blocks of the encrypted or decrypted data. If the output
169data size is smaller, only as many blocks are returned that fit into that
170output buffer size.
171
172Setsockopt interface
173====================
174
175In addition to the read/recv and send/write system call handling to send and
176retrieve data subject to the cipher operation, a consumer also needs to set
177the additional information for the cipher operation. This additional information
178is set using the setsockopt system call that must be invoked with the file
179descriptor of the open cipher (i.e. the file descriptor returned by the
180accept system call).
181
182Each setsockopt invocation must use the level SOL_ALG.
183
184The setsockopt interface allows setting the following data using the mentioned
185optname:
186
187 * ALG_SET_KEY -- Setting the key. Key setting is applicable to:
188
189 - the skcipher cipher type (symmetric ciphers)
190
191 - the hash cipher type (keyed message digests)
192
193User space API example
194======================
195
196Please see [1] for libkcapi which provides an easy-to-use wrapper around the
197aforementioned Netlink kernel interface. [1] also contains a test application
198that invokes all libkcapi API calls.
199
200[1] http://www.chronox.de/libkcapi.html
201
202Author
203======
204
205Stephan Mueller <smueller@chronox.de>
diff --git a/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt b/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
index 5c65eccd0e56..e8a35c71e947 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
@@ -1,5 +1,5 @@
1Freescale SAHARA Cryptographic Accelerator included in some i.MX chips. 1Freescale SAHARA Cryptographic Accelerator included in some i.MX chips.
2Currently only i.MX27 is supported. 2Currently only i.MX27 and i.MX53 are supported.
3 3
4Required properties: 4Required properties:
5- compatible : Should be "fsl,<soc>-sahara" 5- compatible : Should be "fsl,<soc>-sahara"
diff --git a/Documentation/devicetree/bindings/hwrng/atmel-trng.txt b/Documentation/devicetree/bindings/hwrng/atmel-trng.txt
new file mode 100644
index 000000000000..4ac5aaa2d024
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwrng/atmel-trng.txt
@@ -0,0 +1,16 @@
1Atmel TRNG (True Random Number Generator) block
2
3Required properties:
4- compatible : Should be "atmel,at91sam9g45-trng"
5- reg : Offset and length of the register set of this block
6- interrupts : the interrupt number for the TRNG block
7- clocks: should contain the TRNG clk source
8
9Example:
10
11trng@fffcc000 {
12 compatible = "atmel,at91sam9g45-trng";
13 reg = <0xfffcc000 0x4000>;
14 interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
15 clocks = <&trng_clk>;
16};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index d3f65130a1f8..6c0637a4bda5 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -940,6 +940,13 @@
940 status = "disabled"; 940 status = "disabled";
941 }; 941 };
942 942
943 trng@fffcc000 {
944 compatible = "atmel,at91sam9g45-trng";
945 reg = <0xfffcc000 0x4000>;
946 interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
947 clocks = <&trng_clk>;
948 };
949
943 i2c0: i2c@fff84000 { 950 i2c0: i2c@fff84000 {
944 compatible = "atmel,at91sam9g10-i2c"; 951 compatible = "atmel,at91sam9g10-i2c";
945 reg = <0xfff84000 0x100>; 952 reg = <0xfff84000 0x100>;
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
index 3003fa1f6fb4..0409b8f89782 100644
--- a/arch/arm/crypto/aes_glue.c
+++ b/arch/arm/crypto/aes_glue.c
@@ -93,6 +93,6 @@ module_exit(aes_fini);
93 93
94MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)"); 94MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
95MODULE_LICENSE("GPL"); 95MODULE_LICENSE("GPL");
96MODULE_ALIAS("aes"); 96MODULE_ALIAS_CRYPTO("aes");
97MODULE_ALIAS("aes-asm"); 97MODULE_ALIAS_CRYPTO("aes-asm");
98MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>"); 98MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 84f2a756588b..e31b0440c613 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -171,5 +171,5 @@ module_exit(sha1_mod_fini);
171 171
172MODULE_LICENSE("GPL"); 172MODULE_LICENSE("GPL");
173MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)"); 173MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
174MODULE_ALIAS("sha1"); 174MODULE_ALIAS_CRYPTO("sha1");
175MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>"); 175MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 6f1b411b1d55..0b0083757d47 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -194,4 +194,4 @@ module_exit(sha1_neon_mod_fini);
194 194
195MODULE_LICENSE("GPL"); 195MODULE_LICENSE("GPL");
196MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated"); 196MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
197MODULE_ALIAS("sha1"); 197MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
index 0d2758ff5e12..b124dce838d6 100644
--- a/arch/arm/crypto/sha512_neon_glue.c
+++ b/arch/arm/crypto/sha512_neon_glue.c
@@ -241,7 +241,7 @@ static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
241 sha512_neon_final(desc, D); 241 sha512_neon_final(desc, D);
242 242
243 memcpy(hash, D, SHA384_DIGEST_SIZE); 243 memcpy(hash, D, SHA384_DIGEST_SIZE);
244 memset(D, 0, SHA512_DIGEST_SIZE); 244 memzero_explicit(D, SHA512_DIGEST_SIZE);
245 245
246 return 0; 246 return 0;
247} 247}
@@ -301,5 +301,5 @@ module_exit(sha512_neon_mod_fini);
301MODULE_LICENSE("GPL"); 301MODULE_LICENSE("GPL");
302MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated"); 302MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
303 303
304MODULE_ALIAS("sha512"); 304MODULE_ALIAS_CRYPTO("sha512");
305MODULE_ALIAS("sha384"); 305MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index a38b02ce5f9a..2cf32e9887e1 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -49,4 +49,8 @@ config CRYPTO_AES_ARM64_NEON_BLK
49 select CRYPTO_AES 49 select CRYPTO_AES
50 select CRYPTO_ABLK_HELPER 50 select CRYPTO_ABLK_HELPER
51 51
52config CRYPTO_CRC32_ARM64
53 tristate "CRC32 and CRC32C using optional ARMv8 instructions"
54 depends on ARM64
55 select CRYPTO_HASH
52endif 56endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index a3f935fde975..5720608c50b1 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -34,5 +34,9 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4
34 34
35CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS 35CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
36 36
37obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o
38
39CFLAGS_crc32-arm64.o := -mcpu=generic+crc
40
37$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE 41$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
38 $(call if_changed_rule,cc_o_c) 42 $(call if_changed_rule,cc_o_c)
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 0ac73b838fa3..6c348df5bf36 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -296,4 +296,4 @@ module_exit(aes_mod_exit);
296MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions"); 296MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
297MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 297MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
298MODULE_LICENSE("GPL v2"); 298MODULE_LICENSE("GPL v2");
299MODULE_ALIAS("ccm(aes)"); 299MODULE_ALIAS_CRYPTO("ccm(aes)");
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 801aae32841f..b1b5b893eb20 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -44,10 +44,10 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
44#define aes_xts_encrypt neon_aes_xts_encrypt 44#define aes_xts_encrypt neon_aes_xts_encrypt
45#define aes_xts_decrypt neon_aes_xts_decrypt 45#define aes_xts_decrypt neon_aes_xts_decrypt
46MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); 46MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
47MODULE_ALIAS("ecb(aes)"); 47MODULE_ALIAS_CRYPTO("ecb(aes)");
48MODULE_ALIAS("cbc(aes)"); 48MODULE_ALIAS_CRYPTO("cbc(aes)");
49MODULE_ALIAS("ctr(aes)"); 49MODULE_ALIAS_CRYPTO("ctr(aes)");
50MODULE_ALIAS("xts(aes)"); 50MODULE_ALIAS_CRYPTO("xts(aes)");
51#endif 51#endif
52 52
53MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 53MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
new file mode 100644
index 000000000000..9499199924ae
--- /dev/null
+++ b/arch/arm64/crypto/crc32-arm64.c
@@ -0,0 +1,274 @@
1/*
2 * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions
3 *
4 * Module based on crypto/crc32c_generic.c
5 *
6 * CRC32 loop taken from Ed Nevill's Hadoop CRC patch
7 * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E
8 *
9 * Using inline assembly instead of intrinsics in order to be backwards
10 * compatible with older compilers.
11 *
12 * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/unaligned/access_ok.h>
20#include <linux/cpufeature.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/string.h>
25
26#include <crypto/internal/hash.h>
27
28MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>");
29MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions");
30MODULE_LICENSE("GPL v2");
31
32#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
33#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
34#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
35#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
36#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
37#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
38#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
39#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
40
41static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
42{
43 s64 length = len;
44
45 while ((length -= sizeof(u64)) >= 0) {
46 CRC32X(crc, get_unaligned_le64(p));
47 p += sizeof(u64);
48 }
49
50 /* The following is more efficient than the straight loop */
51 if (length & sizeof(u32)) {
52 CRC32W(crc, get_unaligned_le32(p));
53 p += sizeof(u32);
54 }
55 if (length & sizeof(u16)) {
56 CRC32H(crc, get_unaligned_le16(p));
57 p += sizeof(u16);
58 }
59 if (length & sizeof(u8))
60 CRC32B(crc, *p);
61
62 return crc;
63}
64
65static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
66{
67 s64 length = len;
68
69 while ((length -= sizeof(u64)) >= 0) {
70 CRC32CX(crc, get_unaligned_le64(p));
71 p += sizeof(u64);
72 }
73
74 /* The following is more efficient than the straight loop */
75 if (length & sizeof(u32)) {
76 CRC32CW(crc, get_unaligned_le32(p));
77 p += sizeof(u32);
78 }
79 if (length & sizeof(u16)) {
80 CRC32CH(crc, get_unaligned_le16(p));
81 p += sizeof(u16);
82 }
83 if (length & sizeof(u8))
84 CRC32CB(crc, *p);
85
86 return crc;
87}
88
89#define CHKSUM_BLOCK_SIZE 1
90#define CHKSUM_DIGEST_SIZE 4
91
92struct chksum_ctx {
93 u32 key;
94};
95
96struct chksum_desc_ctx {
97 u32 crc;
98};
99
100static int chksum_init(struct shash_desc *desc)
101{
102 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
103 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
104
105 ctx->crc = mctx->key;
106
107 return 0;
108}
109
110/*
111 * Setting the seed allows arbitrary accumulators and flexible XOR policy
112 * If your algorithm starts with ~0, then XOR with ~0 before you set
113 * the seed.
114 */
115static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
116 unsigned int keylen)
117{
118 struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
119
120 if (keylen != sizeof(mctx->key)) {
121 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
122 return -EINVAL;
123 }
124 mctx->key = get_unaligned_le32(key);
125 return 0;
126}
127
128static int chksum_update(struct shash_desc *desc, const u8 *data,
129 unsigned int length)
130{
131 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
132
133 ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length);
134 return 0;
135}
136
137static int chksumc_update(struct shash_desc *desc, const u8 *data,
138 unsigned int length)
139{
140 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
141
142 ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length);
143 return 0;
144}
145
146static int chksum_final(struct shash_desc *desc, u8 *out)
147{
148 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
149
150 put_unaligned_le32(~ctx->crc, out);
151 return 0;
152}
153
154static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
155{
156 put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
157 return 0;
158}
159
160static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
161{
162 put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out);
163 return 0;
164}
165
166static int chksum_finup(struct shash_desc *desc, const u8 *data,
167 unsigned int len, u8 *out)
168{
169 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
170
171 return __chksum_finup(ctx->crc, data, len, out);
172}
173
174static int chksumc_finup(struct shash_desc *desc, const u8 *data,
175 unsigned int len, u8 *out)
176{
177 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
178
179 return __chksumc_finup(ctx->crc, data, len, out);
180}
181
182static int chksum_digest(struct shash_desc *desc, const u8 *data,
183 unsigned int length, u8 *out)
184{
185 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
186
187 return __chksum_finup(mctx->key, data, length, out);
188}
189
190static int chksumc_digest(struct shash_desc *desc, const u8 *data,
191 unsigned int length, u8 *out)
192{
193 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
194
195 return __chksumc_finup(mctx->key, data, length, out);
196}
197
198static int crc32_cra_init(struct crypto_tfm *tfm)
199{
200 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
201
202 mctx->key = ~0;
203 return 0;
204}
205
206static struct shash_alg crc32_alg = {
207 .digestsize = CHKSUM_DIGEST_SIZE,
208 .setkey = chksum_setkey,
209 .init = chksum_init,
210 .update = chksum_update,
211 .final = chksum_final,
212 .finup = chksum_finup,
213 .digest = chksum_digest,
214 .descsize = sizeof(struct chksum_desc_ctx),
215 .base = {
216 .cra_name = "crc32",
217 .cra_driver_name = "crc32-arm64-hw",
218 .cra_priority = 300,
219 .cra_blocksize = CHKSUM_BLOCK_SIZE,
220 .cra_alignmask = 0,
221 .cra_ctxsize = sizeof(struct chksum_ctx),
222 .cra_module = THIS_MODULE,
223 .cra_init = crc32_cra_init,
224 }
225};
226
227static struct shash_alg crc32c_alg = {
228 .digestsize = CHKSUM_DIGEST_SIZE,
229 .setkey = chksum_setkey,
230 .init = chksum_init,
231 .update = chksumc_update,
232 .final = chksum_final,
233 .finup = chksumc_finup,
234 .digest = chksumc_digest,
235 .descsize = sizeof(struct chksum_desc_ctx),
236 .base = {
237 .cra_name = "crc32c",
238 .cra_driver_name = "crc32c-arm64-hw",
239 .cra_priority = 300,
240 .cra_blocksize = CHKSUM_BLOCK_SIZE,
241 .cra_alignmask = 0,
242 .cra_ctxsize = sizeof(struct chksum_ctx),
243 .cra_module = THIS_MODULE,
244 .cra_init = crc32_cra_init,
245 }
246};
247
248static int __init crc32_mod_init(void)
249{
250 int err;
251
252 err = crypto_register_shash(&crc32_alg);
253
254 if (err)
255 return err;
256
257 err = crypto_register_shash(&crc32c_alg);
258
259 if (err) {
260 crypto_unregister_shash(&crc32_alg);
261 return err;
262 }
263
264 return 0;
265}
266
267static void __exit crc32_mod_exit(void)
268{
269 crypto_unregister_shash(&crc32_alg);
270 crypto_unregister_shash(&crc32c_alg);
271}
272
273module_cpu_feature_match(CRC32, crc32_mod_init);
274module_exit(crc32_mod_exit);
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index f9e8b9491efc..d3feba5a275f 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -66,7 +66,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
66 src = data + done; 66 src = data + done;
67 } while (done + 63 < len); 67 } while (done + 63 < len);
68 68
69 memset(temp, 0, sizeof(temp)); 69 memzero_explicit(temp, sizeof(temp));
70 partial = 0; 70 partial = 0;
71 } 71 }
72 memcpy(sctx->buffer + partial, src, len - done); 72 memcpy(sctx->buffer + partial, src, len - done);
@@ -154,4 +154,4 @@ module_exit(sha1_powerpc_mod_fini);
154MODULE_LICENSE("GPL"); 154MODULE_LICENSE("GPL");
155MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); 155MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
156 156
157MODULE_ALIAS("sha1-powerpc"); 157MODULE_ALIAS_CRYPTO("sha1-powerpc");
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 23223cd63e54..1f272b24fc0b 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -979,7 +979,7 @@ static void __exit aes_s390_fini(void)
979module_init(aes_s390_init); 979module_init(aes_s390_init);
980module_exit(aes_s390_fini); 980module_exit(aes_s390_fini);
981 981
982MODULE_ALIAS("aes-all"); 982MODULE_ALIAS_CRYPTO("aes-all");
983 983
984MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 984MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
985MODULE_LICENSE("GPL"); 985MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 7acb77f7ef1a..9e05cc453a40 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void)
619module_init(des_s390_init); 619module_init(des_s390_init);
620module_exit(des_s390_exit); 620module_exit(des_s390_exit);
621 621
622MODULE_ALIAS("des"); 622MODULE_ALIAS_CRYPTO("des");
623MODULE_ALIAS("des3_ede"); 623MODULE_ALIAS_CRYPTO("des3_ede");
624 624
625MODULE_LICENSE("GPL"); 625MODULE_LICENSE("GPL");
626MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); 626MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index d43485d142e9..7940dc90e80b 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void)
160module_init(ghash_mod_init); 160module_init(ghash_mod_init);
161module_exit(ghash_mod_exit); 161module_exit(ghash_mod_exit);
162 162
163MODULE_ALIAS("ghash"); 163MODULE_ALIAS_CRYPTO("ghash");
164 164
165MODULE_LICENSE("GPL"); 165MODULE_LICENSE("GPL");
166MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation"); 166MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index a1b3a9dc9d8a..5b2bee323694 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
103module_init(sha1_s390_init); 103module_init(sha1_s390_init);
104module_exit(sha1_s390_fini); 104module_exit(sha1_s390_fini);
105 105
106MODULE_ALIAS("sha1"); 106MODULE_ALIAS_CRYPTO("sha1");
107MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
108MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); 108MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 9b853809a492..b74ff158108c 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
143module_init(sha256_s390_init); 143module_init(sha256_s390_init);
144module_exit(sha256_s390_fini); 144module_exit(sha256_s390_fini);
145 145
146MODULE_ALIAS("sha256"); 146MODULE_ALIAS_CRYPTO("sha256");
147MODULE_ALIAS("sha224"); 147MODULE_ALIAS_CRYPTO("sha224");
148MODULE_LICENSE("GPL"); 148MODULE_LICENSE("GPL");
149MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm"); 149MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 32a81383b69c..0c36989ba182 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
86 } 86 }
87}; 87};
88 88
89MODULE_ALIAS("sha512"); 89MODULE_ALIAS_CRYPTO("sha512");
90 90
91static int sha384_init(struct shash_desc *desc) 91static int sha384_init(struct shash_desc *desc)
92{ 92{
@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
126 } 126 }
127}; 127};
128 128
129MODULE_ALIAS("sha384"); 129MODULE_ALIAS_CRYPTO("sha384");
130 130
131static int __init init(void) 131static int __init init(void)
132{ 132{
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index df922f52d76d..705408766ab0 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
499MODULE_LICENSE("GPL"); 499MODULE_LICENSE("GPL");
500MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); 500MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
501 501
502MODULE_ALIAS("aes"); 502MODULE_ALIAS_CRYPTO("aes");
503 503
504#include "crop_devid.c" 504#include "crop_devid.c"
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 888f6260b4ec..641f55cb61c3 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
322MODULE_LICENSE("GPL"); 322MODULE_LICENSE("GPL");
323MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); 323MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
324 324
325MODULE_ALIAS("aes"); 325MODULE_ALIAS_CRYPTO("aes");
326 326
327#include "crop_devid.c" 327#include "crop_devid.c"
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
index 5162fad912ce..d1064e46efe8 100644
--- a/arch/sparc/crypto/crc32c_glue.c
+++ b/arch/sparc/crypto/crc32c_glue.c
@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
176MODULE_LICENSE("GPL"); 176MODULE_LICENSE("GPL");
177MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); 177MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
178 178
179MODULE_ALIAS("crc32c"); 179MODULE_ALIAS_CRYPTO("crc32c");
180 180
181#include "crop_devid.c" 181#include "crop_devid.c"
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 3065bc61f9d3..d11500972994 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
532MODULE_LICENSE("GPL"); 532MODULE_LICENSE("GPL");
533MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); 533MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
534 534
535MODULE_ALIAS("des"); 535MODULE_ALIAS_CRYPTO("des");
536 536
537#include "crop_devid.c" 537#include "crop_devid.c"
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index 09a9ea1dfb69..64c7ff5f72a9 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
185MODULE_LICENSE("GPL"); 185MODULE_LICENSE("GPL");
186MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); 186MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
187 187
188MODULE_ALIAS("md5"); 188MODULE_ALIAS_CRYPTO("md5");
189 189
190#include "crop_devid.c" 190#include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 6cd5f29e1e0d..1b3e47accc74 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
180MODULE_LICENSE("GPL"); 180MODULE_LICENSE("GPL");
181MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); 181MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
182 182
183MODULE_ALIAS("sha1"); 183MODULE_ALIAS_CRYPTO("sha1");
184 184
185#include "crop_devid.c" 185#include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 04f555ab2680..285268ca9279 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -135,7 +135,7 @@ static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash)
135 sha256_sparc64_final(desc, D); 135 sha256_sparc64_final(desc, D);
136 136
137 memcpy(hash, D, SHA224_DIGEST_SIZE); 137 memcpy(hash, D, SHA224_DIGEST_SIZE);
138 memset(D, 0, SHA256_DIGEST_SIZE); 138 memzero_explicit(D, SHA256_DIGEST_SIZE);
139 139
140 return 0; 140 return 0;
141} 141}
@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
237MODULE_LICENSE("GPL"); 237MODULE_LICENSE("GPL");
238MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated"); 238MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
239 239
240MODULE_ALIAS("sha224"); 240MODULE_ALIAS_CRYPTO("sha224");
241MODULE_ALIAS("sha256"); 241MODULE_ALIAS_CRYPTO("sha256");
242 242
243#include "crop_devid.c" 243#include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index f04d1994d19a..11eb36c3fc8c 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -139,7 +139,7 @@ static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash)
139 sha512_sparc64_final(desc, D); 139 sha512_sparc64_final(desc, D);
140 140
141 memcpy(hash, D, 48); 141 memcpy(hash, D, 48);
142 memset(D, 0, 64); 142 memzero_explicit(D, 64);
143 143
144 return 0; 144 return 0;
145} 145}
@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
222MODULE_LICENSE("GPL"); 222MODULE_LICENSE("GPL");
223MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated"); 223MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
224 224
225MODULE_ALIAS("sha384"); 225MODULE_ALIAS_CRYPTO("sha384");
226MODULE_ALIAS("sha512"); 226MODULE_ALIAS_CRYPTO("sha512");
227 227
228#include "crop_devid.c" 228#include "crop_devid.c"
diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
index aafe8ce0d65d..e26984f7ab8d 100644
--- a/arch/x86/crypto/aes_glue.c
+++ b/arch/x86/crypto/aes_glue.c
@@ -66,5 +66,5 @@ module_exit(aes_fini);
66 66
67MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized"); 67MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
68MODULE_LICENSE("GPL"); 68MODULE_LICENSE("GPL");
69MODULE_ALIAS("aes"); 69MODULE_ALIAS_CRYPTO("aes");
70MODULE_ALIAS("aes-asm"); 70MODULE_ALIAS_CRYPTO("aes-asm");
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 888950f29fd9..ae855f4f64b7 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -43,10 +43,6 @@
43#include <asm/crypto/glue_helper.h> 43#include <asm/crypto/glue_helper.h>
44#endif 44#endif
45 45
46#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
47#define HAS_PCBC
48#endif
49
50/* This data is stored at the end of the crypto_tfm struct. 46/* This data is stored at the end of the crypto_tfm struct.
51 * It's a type of per "session" data storage location. 47 * It's a type of per "session" data storage location.
52 * This needs to be 16 byte aligned. 48 * This needs to be 16 byte aligned.
@@ -547,7 +543,7 @@ static int ablk_ctr_init(struct crypto_tfm *tfm)
547 543
548#endif 544#endif
549 545
550#ifdef HAS_PCBC 546#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
551static int ablk_pcbc_init(struct crypto_tfm *tfm) 547static int ablk_pcbc_init(struct crypto_tfm *tfm)
552{ 548{
553 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))"); 549 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
@@ -1377,7 +1373,7 @@ static struct crypto_alg aesni_algs[] = { {
1377 }, 1373 },
1378 }, 1374 },
1379#endif 1375#endif
1380#ifdef HAS_PCBC 1376#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1381}, { 1377}, {
1382 .cra_name = "pcbc(aes)", 1378 .cra_name = "pcbc(aes)",
1383 .cra_driver_name = "pcbc-aes-aesni", 1379 .cra_driver_name = "pcbc-aes-aesni",
@@ -1550,4 +1546,4 @@ module_exit(aesni_exit);
1550 1546
1551MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); 1547MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1552MODULE_LICENSE("GPL"); 1548MODULE_LICENSE("GPL");
1553MODULE_ALIAS("aes"); 1549MODULE_ALIAS_CRYPTO("aes");
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index 8af519ed73d1..17c05531dfd1 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -478,5 +478,5 @@ module_exit(fini);
478 478
479MODULE_LICENSE("GPL"); 479MODULE_LICENSE("GPL");
480MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized"); 480MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
481MODULE_ALIAS("blowfish"); 481MODULE_ALIAS_CRYPTO("blowfish");
482MODULE_ALIAS("blowfish-asm"); 482MODULE_ALIAS_CRYPTO("blowfish-asm");
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 4209a76fcdaa..9a07fafe3831 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
582 582
583MODULE_LICENSE("GPL"); 583MODULE_LICENSE("GPL");
584MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized"); 584MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
585MODULE_ALIAS("camellia"); 585MODULE_ALIAS_CRYPTO("camellia");
586MODULE_ALIAS("camellia-asm"); 586MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 87a041a10f4a..ed38d959add6 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
574 574
575MODULE_LICENSE("GPL"); 575MODULE_LICENSE("GPL");
576MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized"); 576MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
577MODULE_ALIAS("camellia"); 577MODULE_ALIAS_CRYPTO("camellia");
578MODULE_ALIAS("camellia-asm"); 578MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index c171dcbf192d..5c8b6266a394 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1725,5 +1725,5 @@ module_exit(fini);
1725 1725
1726MODULE_LICENSE("GPL"); 1726MODULE_LICENSE("GPL");
1727MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized"); 1727MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
1728MODULE_ALIAS("camellia"); 1728MODULE_ALIAS_CRYPTO("camellia");
1729MODULE_ALIAS("camellia-asm"); 1729MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index e57e20ab5e0b..60ada677a928 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -491,4 +491,4 @@ module_exit(cast5_exit);
491 491
492MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized"); 492MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
493MODULE_LICENSE("GPL"); 493MODULE_LICENSE("GPL");
494MODULE_ALIAS("cast5"); 494MODULE_ALIAS_CRYPTO("cast5");
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 09f3677393e4..0160f68a57ff 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -611,4 +611,4 @@ module_exit(cast6_exit);
611 611
612MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized"); 612MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
613MODULE_LICENSE("GPL"); 613MODULE_LICENSE("GPL");
614MODULE_ALIAS("cast6"); 614MODULE_ALIAS_CRYPTO("cast6");
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 9d014a74ef96..1937fc1d8763 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
197MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); 197MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
198MODULE_LICENSE("GPL"); 198MODULE_LICENSE("GPL");
199 199
200MODULE_ALIAS("crc32"); 200MODULE_ALIAS_CRYPTO("crc32");
201MODULE_ALIAS("crc32-pclmul"); 201MODULE_ALIAS_CRYPTO("crc32-pclmul");
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 6812ad98355c..28640c3d6af7 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.c
280MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware."); 280MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
281MODULE_LICENSE("GPL"); 281MODULE_LICENSE("GPL");
282 282
283MODULE_ALIAS("crc32c"); 283MODULE_ALIAS_CRYPTO("crc32c");
284MODULE_ALIAS("crc32c-intel"); 284MODULE_ALIAS_CRYPTO("crc32c-intel");
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index 7845d7fd54c0..b6c67bf30fdf 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -147,5 +147,5 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
147MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); 147MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
148MODULE_LICENSE("GPL"); 148MODULE_LICENSE("GPL");
149 149
150MODULE_ALIAS("crct10dif"); 150MODULE_ALIAS_CRYPTO("crct10dif");
151MODULE_ALIAS("crct10dif-pclmul"); 151MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 0e9c0668fe4e..38a14f818ef1 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -502,8 +502,8 @@ module_exit(des3_ede_x86_fini);
502 502
503MODULE_LICENSE("GPL"); 503MODULE_LICENSE("GPL");
504MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); 504MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
505MODULE_ALIAS("des3_ede"); 505MODULE_ALIAS_CRYPTO("des3_ede");
506MODULE_ALIAS("des3_ede-asm"); 506MODULE_ALIAS_CRYPTO("des3_ede-asm");
507MODULE_ALIAS("des"); 507MODULE_ALIAS_CRYPTO("des");
508MODULE_ALIAS("des-asm"); 508MODULE_ALIAS_CRYPTO("des-asm");
509MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>"); 509MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index 98d7a188f46b..f368ba261739 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/crypto.h>
20#include <asm/i387.h> 21#include <asm/i387.h>
21 22
22struct crypto_fpu_ctx { 23struct crypto_fpu_ctx {
@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
159{ 160{
160 crypto_unregister_template(&crypto_fpu_tmpl); 161 crypto_unregister_template(&crypto_fpu_tmpl);
161} 162}
163
164MODULE_ALIAS_CRYPTO("fpu");
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 88bb7ba8b175..8253d85aa165 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
341MODULE_LICENSE("GPL"); 341MODULE_LICENSE("GPL");
342MODULE_DESCRIPTION("GHASH Message Digest Algorithm, " 342MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
343 "acclerated by PCLMULQDQ-NI"); 343 "acclerated by PCLMULQDQ-NI");
344MODULE_ALIAS("ghash"); 344MODULE_ALIAS_CRYPTO("ghash");
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index 5e8e67739bb5..399a29d067d6 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -119,5 +119,5 @@ module_exit(fini);
119 119
120MODULE_LICENSE("GPL"); 120MODULE_LICENSE("GPL");
121MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); 121MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
122MODULE_ALIAS("salsa20"); 122MODULE_ALIAS_CRYPTO("salsa20");
123MODULE_ALIAS("salsa20-asm"); 123MODULE_ALIAS_CRYPTO("salsa20-asm");
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 2fae489b1524..437e47a4d302 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -558,5 +558,5 @@ module_exit(fini);
558 558
559MODULE_LICENSE("GPL"); 559MODULE_LICENSE("GPL");
560MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized"); 560MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
561MODULE_ALIAS("serpent"); 561MODULE_ALIAS_CRYPTO("serpent");
562MODULE_ALIAS("serpent-asm"); 562MODULE_ALIAS_CRYPTO("serpent-asm");
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index ff4870870972..7e217398b4eb 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -617,4 +617,4 @@ module_exit(serpent_exit);
617 617
618MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized"); 618MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
619MODULE_LICENSE("GPL"); 619MODULE_LICENSE("GPL");
620MODULE_ALIAS("serpent"); 620MODULE_ALIAS_CRYPTO("serpent");
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 8c95f8637306..bf025adaea01 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
618 618
619MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized"); 619MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
620MODULE_LICENSE("GPL"); 620MODULE_LICENSE("GPL");
621MODULE_ALIAS("serpent"); 621MODULE_ALIAS_CRYPTO("serpent");
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index 99eefd812958..a225a5ca1037 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -204,8 +204,7 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
204 continue; 204 continue;
205 } 205 }
206 206
207 if (ctx) 207 ctx->status = HASH_CTX_STS_IDLE;
208 ctx->status = HASH_CTX_STS_IDLE;
209 return ctx; 208 return ctx;
210 } 209 }
211 210
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 74d16ef707c7..6c20fe04a738 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -278,4 +278,4 @@ module_exit(sha1_ssse3_mod_fini);
278MODULE_LICENSE("GPL"); 278MODULE_LICENSE("GPL");
279MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 279MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
280 280
281MODULE_ALIAS("sha1"); 281MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index f248546da1ca..8fad72f4dfd2 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -211,7 +211,7 @@ static int sha224_ssse3_final(struct shash_desc *desc, u8 *hash)
211 sha256_ssse3_final(desc, D); 211 sha256_ssse3_final(desc, D);
212 212
213 memcpy(hash, D, SHA224_DIGEST_SIZE); 213 memcpy(hash, D, SHA224_DIGEST_SIZE);
214 memset(D, 0, SHA256_DIGEST_SIZE); 214 memzero_explicit(D, SHA256_DIGEST_SIZE);
215 215
216 return 0; 216 return 0;
217} 217}
@@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini);
318MODULE_LICENSE("GPL"); 318MODULE_LICENSE("GPL");
319MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 319MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
320 320
321MODULE_ALIAS("sha256"); 321MODULE_ALIAS_CRYPTO("sha256");
322MODULE_ALIAS("sha224"); 322MODULE_ALIAS_CRYPTO("sha224");
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 8626b03e83b7..0b6af26832bf 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -219,7 +219,7 @@ static int sha384_ssse3_final(struct shash_desc *desc, u8 *hash)
219 sha512_ssse3_final(desc, D); 219 sha512_ssse3_final(desc, D);
220 220
221 memcpy(hash, D, SHA384_DIGEST_SIZE); 221 memcpy(hash, D, SHA384_DIGEST_SIZE);
222 memset(D, 0, SHA512_DIGEST_SIZE); 222 memzero_explicit(D, SHA512_DIGEST_SIZE);
223 223
224 return 0; 224 return 0;
225} 225}
@@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini);
326MODULE_LICENSE("GPL"); 326MODULE_LICENSE("GPL");
327MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 327MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
328 328
329MODULE_ALIAS("sha512"); 329MODULE_ALIAS_CRYPTO("sha512");
330MODULE_ALIAS("sha384"); 330MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 4e3c665be129..1ac531ea9bcc 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -579,4 +579,4 @@ module_exit(twofish_exit);
579 579
580MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized"); 580MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
581MODULE_LICENSE("GPL"); 581MODULE_LICENSE("GPL");
582MODULE_ALIAS("twofish"); 582MODULE_ALIAS_CRYPTO("twofish");
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index 0a5202303501..77e06c2da83d 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -96,5 +96,5 @@ module_exit(fini);
96 96
97MODULE_LICENSE("GPL"); 97MODULE_LICENSE("GPL");
98MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized"); 98MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
99MODULE_ALIAS("twofish"); 99MODULE_ALIAS_CRYPTO("twofish");
100MODULE_ALIAS("twofish-asm"); 100MODULE_ALIAS_CRYPTO("twofish-asm");
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 13e63b3e1dfb..56d8a08ee479 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -495,5 +495,5 @@ module_exit(fini);
495 495
496MODULE_LICENSE("GPL"); 496MODULE_LICENSE("GPL");
497MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized"); 497MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
498MODULE_ALIAS("twofish"); 498MODULE_ALIAS_CRYPTO("twofish");
499MODULE_ALIAS("twofish-asm"); 499MODULE_ALIAS_CRYPTO("twofish-asm");
diff --git a/crypto/842.c b/crypto/842.c
index 65c7a89cfa09..b48f4f108c47 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
180 180
181MODULE_LICENSE("GPL"); 181MODULE_LICENSE("GPL");
182MODULE_DESCRIPTION("842 Compression Algorithm"); 182MODULE_DESCRIPTION("842 Compression Algorithm");
183MODULE_ALIAS_CRYPTO("842");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index fd0d6b454975..9b3c54c1cbe8 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1474,4 +1474,4 @@ module_exit(aes_fini);
1474 1474
1475MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 1475MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1476MODULE_LICENSE("Dual BSD/GPL"); 1476MODULE_LICENSE("Dual BSD/GPL");
1477MODULE_ALIAS("aes"); 1477MODULE_ALIAS_CRYPTO("aes");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index bc21f520d489..1fa7bc31be63 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -421,6 +421,12 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
421 con->op = *(u32 *)CMSG_DATA(cmsg); 421 con->op = *(u32 *)CMSG_DATA(cmsg);
422 break; 422 break;
423 423
424 case ALG_SET_AEAD_ASSOCLEN:
425 if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
426 return -EINVAL;
427 con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg);
428 break;
429
424 default: 430 default:
425 return -EINVAL; 431 return -EINVAL;
426 } 432 }
diff --git a/crypto/algapi.c b/crypto/algapi.c
index e8d3a7dca8c4..71a8143e23b1 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -509,8 +509,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
509 509
510struct crypto_template *crypto_lookup_template(const char *name) 510struct crypto_template *crypto_lookup_template(const char *name)
511{ 511{
512 return try_then_request_module(__crypto_lookup_template(name), "%s", 512 return try_then_request_module(__crypto_lookup_template(name),
513 name); 513 "crypto-%s", name);
514} 514}
515EXPORT_SYMBOL_GPL(crypto_lookup_template); 515EXPORT_SYMBOL_GPL(crypto_lookup_template);
516 516
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 83cd2cc49c9f..01f56eb7816e 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -258,8 +258,8 @@ static void hash_sock_destruct(struct sock *sk)
258 struct alg_sock *ask = alg_sk(sk); 258 struct alg_sock *ask = alg_sk(sk);
259 struct hash_ctx *ctx = ask->private; 259 struct hash_ctx *ctx = ask->private;
260 260
261 sock_kfree_s(sk, ctx->result, 261 sock_kzfree_s(sk, ctx->result,
262 crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); 262 crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
263 sock_kfree_s(sk, ctx, ctx->len); 263 sock_kfree_s(sk, ctx, ctx->len);
264 af_alg_release_parent(sk); 264 af_alg_release_parent(sk);
265} 265}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 4f45dab24648..c12207c8dde9 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -251,6 +251,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
251 struct af_alg_control con = {}; 251 struct af_alg_control con = {};
252 long copied = 0; 252 long copied = 0;
253 bool enc = 0; 253 bool enc = 0;
254 bool init = 0;
254 int err; 255 int err;
255 int i; 256 int i;
256 257
@@ -259,6 +260,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
259 if (err) 260 if (err)
260 return err; 261 return err;
261 262
263 init = 1;
262 switch (con.op) { 264 switch (con.op) {
263 case ALG_OP_ENCRYPT: 265 case ALG_OP_ENCRYPT:
264 enc = 1; 266 enc = 1;
@@ -280,7 +282,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
280 if (!ctx->more && ctx->used) 282 if (!ctx->more && ctx->used)
281 goto unlock; 283 goto unlock;
282 284
283 if (!ctx->used) { 285 if (init) {
284 ctx->enc = enc; 286 ctx->enc = enc;
285 if (con.iv) 287 if (con.iv)
286 memcpy(ctx->iv, con.iv->iv, ivsize); 288 memcpy(ctx->iv, con.iv->iv, ivsize);
@@ -359,8 +361,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
359 err = 0; 361 err = 0;
360 362
361 ctx->more = msg->msg_flags & MSG_MORE; 363 ctx->more = msg->msg_flags & MSG_MORE;
362 if (!ctx->more && !list_empty(&ctx->tsgl))
363 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
364 364
365unlock: 365unlock:
366 skcipher_data_wakeup(sk); 366 skcipher_data_wakeup(sk);
@@ -408,8 +408,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
408 408
409done: 409done:
410 ctx->more = flags & MSG_MORE; 410 ctx->more = flags & MSG_MORE;
411 if (!ctx->more && !list_empty(&ctx->tsgl))
412 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
413 411
414unlock: 412unlock:
415 skcipher_data_wakeup(sk); 413 skcipher_data_wakeup(sk);
@@ -448,14 +446,13 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
448 while (!sg->length) 446 while (!sg->length)
449 sg++; 447 sg++;
450 448
451 used = ctx->used; 449 if (!ctx->used) {
452 if (!used) {
453 err = skcipher_wait_for_data(sk, flags); 450 err = skcipher_wait_for_data(sk, flags);
454 if (err) 451 if (err)
455 goto unlock; 452 goto unlock;
456 } 453 }
457 454
458 used = min_t(unsigned long, used, seglen); 455 used = min_t(unsigned long, ctx->used, seglen);
459 456
460 used = af_alg_make_sg(&ctx->rsgl, from, used, 1); 457 used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
461 err = used; 458 err = used;
@@ -566,7 +563,7 @@ static void skcipher_sock_destruct(struct sock *sk)
566 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); 563 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
567 564
568 skcipher_free_sgl(sk); 565 skcipher_free_sgl(sk);
569 sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); 566 sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
570 sock_kfree_s(sk, ctx, ctx->len); 567 sock_kfree_s(sk, ctx, ctx->len);
571 af_alg_release_parent(sk); 568 af_alg_release_parent(sk);
572} 569}
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 666f1962a160..b4485a108389 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -476,4 +476,4 @@ module_param(dbg, int, 0);
476MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); 476MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
477module_init(prng_mod_init); 477module_init(prng_mod_init);
478module_exit(prng_mod_fini); 478module_exit(prng_mod_fini);
479MODULE_ALIAS("stdrng"); 479MODULE_ALIAS_CRYPTO("stdrng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 008c8a4fb67c..4bb187c2a902 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
704 704
705MODULE_LICENSE("GPL"); 705MODULE_LICENSE("GPL");
706MODULE_DESCRIPTION("Anubis Cryptographic Algorithm"); 706MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
707MODULE_ALIAS_CRYPTO("anubis");
diff --git a/crypto/api.c b/crypto/api.c
index a2b39c5f3649..2a81e98a0021 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
216 216
217 alg = crypto_alg_lookup(name, type, mask); 217 alg = crypto_alg_lookup(name, type, mask);
218 if (!alg) { 218 if (!alg) {
219 request_module("%s", name); 219 request_module("crypto-%s", name);
220 220
221 if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & 221 if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
222 CRYPTO_ALG_NEED_FALLBACK)) 222 CRYPTO_ALG_NEED_FALLBACK))
223 request_module("%s-all", name); 223 request_module("crypto-%s-all", name);
224 224
225 alg = crypto_alg_lookup(name, type, mask); 225 alg = crypto_alg_lookup(name, type, mask);
226 } 226 }
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 5a772c3657d5..f1a81925558f 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -166,3 +166,4 @@ module_exit(arc4_exit);
166MODULE_LICENSE("GPL"); 166MODULE_LICENSE("GPL");
167MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); 167MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
168MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>"); 168MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
169MODULE_ALIAS_CRYPTO("arc4");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index e1223559d5df..78fb16cab13f 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -721,3 +721,4 @@ module_exit(crypto_authenc_module_exit);
721 721
722MODULE_LICENSE("GPL"); 722MODULE_LICENSE("GPL");
723MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec"); 723MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
724MODULE_ALIAS_CRYPTO("authenc");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 4be0dd4373a9..024bff2344fc 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -814,3 +814,4 @@ module_exit(crypto_authenc_esn_module_exit);
814MODULE_LICENSE("GPL"); 814MODULE_LICENSE("GPL");
815MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 815MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
816MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers"); 816MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
817MODULE_ALIAS_CRYPTO("authencesn");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 8baf5447d35b..7bd71f02d0dd 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -138,4 +138,4 @@ module_exit(blowfish_mod_fini);
138 138
139MODULE_LICENSE("GPL"); 139MODULE_LICENSE("GPL");
140MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); 140MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
141MODULE_ALIAS("blowfish"); 141MODULE_ALIAS_CRYPTO("blowfish");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 26bcd7a2d6b4..1b74c5a3e891 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1098,4 +1098,4 @@ module_exit(camellia_fini);
1098 1098
1099MODULE_DESCRIPTION("Camellia Cipher Algorithm"); 1099MODULE_DESCRIPTION("Camellia Cipher Algorithm");
1100MODULE_LICENSE("GPL"); 1100MODULE_LICENSE("GPL");
1101MODULE_ALIAS("camellia"); 1101MODULE_ALIAS_CRYPTO("camellia");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 5558f630a0eb..84c86db67ec7 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -549,4 +549,4 @@ module_exit(cast5_mod_fini);
549 549
550MODULE_LICENSE("GPL"); 550MODULE_LICENSE("GPL");
551MODULE_DESCRIPTION("Cast5 Cipher Algorithm"); 551MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
552MODULE_ALIAS("cast5"); 552MODULE_ALIAS_CRYPTO("cast5");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index de732528a430..f408f0bd8de2 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -291,4 +291,4 @@ module_exit(cast6_mod_fini);
291 291
292MODULE_LICENSE("GPL"); 292MODULE_LICENSE("GPL");
293MODULE_DESCRIPTION("Cast6 Cipher Algorithm"); 293MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
294MODULE_ALIAS("cast6"); 294MODULE_ALIAS_CRYPTO("cast6");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 61ac42e1e32b..780ee27b2d43 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
289 289
290MODULE_LICENSE("GPL"); 290MODULE_LICENSE("GPL");
291MODULE_DESCRIPTION("CBC block cipher algorithm"); 291MODULE_DESCRIPTION("CBC block cipher algorithm");
292MODULE_ALIAS_CRYPTO("cbc");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 1df84217f7c9..003bbbd21a2b 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
879 879
880MODULE_LICENSE("GPL"); 880MODULE_LICENSE("GPL");
881MODULE_DESCRIPTION("Counter with CBC MAC"); 881MODULE_DESCRIPTION("Counter with CBC MAC");
882MODULE_ALIAS("ccm_base"); 882MODULE_ALIAS_CRYPTO("ccm_base");
883MODULE_ALIAS("rfc4309"); 883MODULE_ALIAS_CRYPTO("rfc4309");
884MODULE_ALIAS_CRYPTO("ccm");
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 9c294c8f9a07..63c17d5992f7 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
359 359
360MODULE_LICENSE("GPL"); 360MODULE_LICENSE("GPL");
361MODULE_DESCRIPTION("Chain IV Generator"); 361MODULE_DESCRIPTION("Chain IV Generator");
362MODULE_ALIAS_CRYPTO("chainiv");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 50880cf17fad..7a8bfbd548f6 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
313 313
314MODULE_LICENSE("GPL"); 314MODULE_LICENSE("GPL");
315MODULE_DESCRIPTION("CMAC keyed hash algorithm"); 315MODULE_DESCRIPTION("CMAC keyed hash algorithm");
316MODULE_ALIAS_CRYPTO("cmac");
diff --git a/crypto/crc32.c b/crypto/crc32.c
index 9d1c41569898..187ded28cb0b 100644
--- a/crypto/crc32.c
+++ b/crypto/crc32.c
@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
156MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); 156MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
157MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); 157MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
158MODULE_LICENSE("GPL"); 158MODULE_LICENSE("GPL");
159MODULE_ALIAS_CRYPTO("crc32");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index d9c7beba8e50..2a062025749d 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -170,5 +170,5 @@ module_exit(crc32c_mod_fini);
170MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); 170MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
171MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); 171MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
172MODULE_LICENSE("GPL"); 172MODULE_LICENSE("GPL");
173MODULE_ALIAS("crc32c"); 173MODULE_ALIAS_CRYPTO("crc32c");
174MODULE_SOFTDEP("pre: crc32c"); 174MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 877e7114ec5c..08bb4f504520 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -124,4 +124,4 @@ module_exit(crct10dif_mod_fini);
124MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); 124MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
125MODULE_DESCRIPTION("T10 DIF CRC calculation."); 125MODULE_DESCRIPTION("T10 DIF CRC calculation.");
126MODULE_LICENSE("GPL"); 126MODULE_LICENSE("GPL");
127MODULE_ALIAS("crct10dif"); 127MODULE_ALIAS_CRYPTO("crct10dif");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index e592c90abebb..650afac10fd7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
955 955
956MODULE_LICENSE("GPL"); 956MODULE_LICENSE("GPL");
957MODULE_DESCRIPTION("Software async crypto daemon"); 957MODULE_DESCRIPTION("Software async crypto daemon");
958MODULE_ALIAS_CRYPTO("cryptd");
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 1dc54bb95a87..a20319132e33 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -145,9 +145,9 @@ static struct crypto_alg null_algs[3] = { {
145 .coa_decompress = null_compress } } 145 .coa_decompress = null_compress } }
146} }; 146} };
147 147
148MODULE_ALIAS("compress_null"); 148MODULE_ALIAS_CRYPTO("compress_null");
149MODULE_ALIAS("digest_null"); 149MODULE_ALIAS_CRYPTO("digest_null");
150MODULE_ALIAS("cipher_null"); 150MODULE_ALIAS_CRYPTO("cipher_null");
151 151
152static int __init crypto_null_mod_init(void) 152static int __init crypto_null_mod_init(void)
153{ 153{
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index e2a34feec7a4..c5148a35ae0a 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -201,10 +201,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
201 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) 201 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
202 return -EINVAL; 202 return -EINVAL;
203 203
204 if (!p->cru_driver_name[0]) 204 alg = crypto_alg_match(p, 0);
205 return -EINVAL;
206
207 alg = crypto_alg_match(p, 1);
208 if (!alg) 205 if (!alg)
209 return -ENOENT; 206 return -ENOENT;
210 207
@@ -537,3 +534,4 @@ module_exit(crypto_user_exit);
537MODULE_LICENSE("GPL"); 534MODULE_LICENSE("GPL");
538MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 535MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
539MODULE_DESCRIPTION("Crypto userspace configuration API"); 536MODULE_DESCRIPTION("Crypto userspace configuration API");
537MODULE_ALIAS("net-pf-16-proto-21");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index f2b94f27bb2c..2386f7313952 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
466 466
467MODULE_LICENSE("GPL"); 467MODULE_LICENSE("GPL");
468MODULE_DESCRIPTION("CTR Counter block mode"); 468MODULE_DESCRIPTION("CTR Counter block mode");
469MODULE_ALIAS("rfc3686"); 469MODULE_ALIAS_CRYPTO("rfc3686");
470MODULE_ALIAS_CRYPTO("ctr");
diff --git a/crypto/cts.c b/crypto/cts.c
index 133f0874c95e..bd9405820e8a 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -351,3 +351,4 @@ module_exit(crypto_cts_module_exit);
351 351
352MODULE_LICENSE("Dual BSD/GPL"); 352MODULE_LICENSE("Dual BSD/GPL");
353MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC"); 353MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
354MODULE_ALIAS_CRYPTO("cts");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index b57d70eb156b..95d8d37c5021 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
222MODULE_LICENSE("GPL"); 222MODULE_LICENSE("GPL");
223MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP"); 223MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
224MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); 224MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
225 225MODULE_ALIAS_CRYPTO("deflate");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 298d464ab7d2..42912948776b 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -983,7 +983,7 @@ static struct crypto_alg des_algs[2] = { {
983 .cia_decrypt = des3_ede_decrypt } } 983 .cia_decrypt = des3_ede_decrypt } }
984} }; 984} };
985 985
986MODULE_ALIAS("des3_ede"); 986MODULE_ALIAS_CRYPTO("des3_ede");
987 987
988static int __init des_generic_mod_init(void) 988static int __init des_generic_mod_init(void)
989{ 989{
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 54cfd4820abc..d748a1d0ca24 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,6 +98,7 @@
98 */ 98 */
99 99
100#include <crypto/drbg.h> 100#include <crypto/drbg.h>
101#include <linux/string.h>
101 102
102/*************************************************************** 103/***************************************************************
103 * Backend cipher definitions available to DRBG 104 * Backend cipher definitions available to DRBG
@@ -283,38 +284,6 @@ static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
283 284
284 conversion->conv = cpu_to_be32(val); 285 conversion->conv = cpu_to_be32(val);
285} 286}
286
287/*
288 * Increment buffer
289 *
290 * @dst buffer to increment
291 * @add value to add
292 */
293static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
294 const unsigned char *add, size_t addlen)
295{
296 /* implied: dstlen > addlen */
297 unsigned char *dstptr;
298 const unsigned char *addptr;
299 unsigned int remainder = 0;
300 size_t len = addlen;
301
302 dstptr = dst + (dstlen-1);
303 addptr = add + (addlen-1);
304 while (len) {
305 remainder += *dstptr + *addptr;
306 *dstptr = remainder & 0xff;
307 remainder >>= 8;
308 len--; dstptr--; addptr--;
309 }
310 len = dstlen - addlen;
311 while (len && remainder > 0) {
312 remainder = *dstptr + 1;
313 *dstptr = remainder & 0xff;
314 remainder >>= 8;
315 len--; dstptr--;
316 }
317}
318#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */ 287#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
319 288
320/****************************************************************** 289/******************************************************************
@@ -323,6 +292,13 @@ static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
323 292
324#ifdef CONFIG_CRYPTO_DRBG_CTR 293#ifdef CONFIG_CRYPTO_DRBG_CTR
325#define CRYPTO_DRBG_CTR_STRING "CTR " 294#define CRYPTO_DRBG_CTR_STRING "CTR "
295MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes256");
296MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes256");
297MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes192");
298MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
299MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
300MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
301
326static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, 302static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
327 unsigned char *outval, const struct drbg_string *in); 303 unsigned char *outval, const struct drbg_string *in);
328static int drbg_init_sym_kernel(struct drbg_state *drbg); 304static int drbg_init_sym_kernel(struct drbg_state *drbg);
@@ -522,9 +498,9 @@ static int drbg_ctr_df(struct drbg_state *drbg,
522 ret = 0; 498 ret = 0;
523 499
524out: 500out:
525 memset(iv, 0, drbg_blocklen(drbg)); 501 memzero_explicit(iv, drbg_blocklen(drbg));
526 memset(temp, 0, drbg_statelen(drbg)); 502 memzero_explicit(temp, drbg_statelen(drbg));
527 memset(pad, 0, drbg_blocklen(drbg)); 503 memzero_explicit(pad, drbg_blocklen(drbg));
528 return ret; 504 return ret;
529} 505}
530 506
@@ -554,7 +530,6 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
554 unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */ 530 unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
555 unsigned int len = 0; 531 unsigned int len = 0;
556 struct drbg_string cipherin; 532 struct drbg_string cipherin;
557 unsigned char prefix = DRBG_PREFIX1;
558 533
559 memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); 534 memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
560 if (3 > reseed) 535 if (3 > reseed)
@@ -574,7 +549,7 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
574 */ 549 */
575 while (len < (drbg_statelen(drbg))) { 550 while (len < (drbg_statelen(drbg))) {
576 /* 10.2.1.2 step 2.1 */ 551 /* 10.2.1.2 step 2.1 */
577 drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1); 552 crypto_inc(drbg->V, drbg_blocklen(drbg));
578 /* 553 /*
579 * 10.2.1.2 step 2.2 */ 554 * 10.2.1.2 step 2.2 */
580 ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin); 555 ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
@@ -599,9 +574,9 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
599 ret = 0; 574 ret = 0;
600 575
601out: 576out:
602 memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); 577 memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg));
603 if (2 != reseed) 578 if (2 != reseed)
604 memset(df_data, 0, drbg_statelen(drbg)); 579 memzero_explicit(df_data, drbg_statelen(drbg));
605 return ret; 580 return ret;
606} 581}
607 582
@@ -617,7 +592,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
617 int len = 0; 592 int len = 0;
618 int ret = 0; 593 int ret = 0;
619 struct drbg_string data; 594 struct drbg_string data;
620 unsigned char prefix = DRBG_PREFIX1;
621 595
622 memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); 596 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
623 597
@@ -629,7 +603,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
629 } 603 }
630 604
631 /* 10.2.1.5.2 step 4.1 */ 605 /* 10.2.1.5.2 step 4.1 */
632 drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1); 606 crypto_inc(drbg->V, drbg_blocklen(drbg));
633 drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg)); 607 drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg));
634 while (len < buflen) { 608 while (len < buflen) {
635 int outlen = 0; 609 int outlen = 0;
@@ -643,7 +617,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
643 drbg_blocklen(drbg) : (buflen - len); 617 drbg_blocklen(drbg) : (buflen - len);
644 if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) { 618 if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
645 /* 10.2.1.5.2 step 6 */ 619 /* 10.2.1.5.2 step 6 */
646 drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1); 620 crypto_inc(drbg->V, drbg_blocklen(drbg));
647 continue; 621 continue;
648 } 622 }
649 /* 10.2.1.5.2 step 4.3 */ 623 /* 10.2.1.5.2 step 4.3 */
@@ -651,7 +625,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
651 len += outlen; 625 len += outlen;
652 /* 10.2.1.5.2 step 6 */ 626 /* 10.2.1.5.2 step 6 */
653 if (len < buflen) 627 if (len < buflen)
654 drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1); 628 crypto_inc(drbg->V, drbg_blocklen(drbg));
655 } 629 }
656 630
657 /* 10.2.1.5.2 step 6 */ 631 /* 10.2.1.5.2 step 6 */
@@ -660,7 +634,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
660 len = ret; 634 len = ret;
661 635
662out: 636out:
663 memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); 637 memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
664 return len; 638 return len;
665} 639}
666 640
@@ -685,6 +659,15 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg);
685 659
686#ifdef CONFIG_CRYPTO_DRBG_HMAC 660#ifdef CONFIG_CRYPTO_DRBG_HMAC
687#define CRYPTO_DRBG_HMAC_STRING "HMAC " 661#define CRYPTO_DRBG_HMAC_STRING "HMAC "
662MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha512");
663MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha512");
664MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384");
665MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384");
666MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256");
667MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256");
668MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha1");
669MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha1");
670
688/* update function of HMAC DRBG as defined in 10.1.2.2 */ 671/* update function of HMAC DRBG as defined in 10.1.2.2 */
689static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed, 672static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
690 int reseed) 673 int reseed)
@@ -796,6 +779,47 @@ static struct drbg_state_ops drbg_hmac_ops = {
796 779
797#ifdef CONFIG_CRYPTO_DRBG_HASH 780#ifdef CONFIG_CRYPTO_DRBG_HASH
798#define CRYPTO_DRBG_HASH_STRING "HASH " 781#define CRYPTO_DRBG_HASH_STRING "HASH "
782MODULE_ALIAS_CRYPTO("drbg_pr_sha512");
783MODULE_ALIAS_CRYPTO("drbg_nopr_sha512");
784MODULE_ALIAS_CRYPTO("drbg_pr_sha384");
785MODULE_ALIAS_CRYPTO("drbg_nopr_sha384");
786MODULE_ALIAS_CRYPTO("drbg_pr_sha256");
787MODULE_ALIAS_CRYPTO("drbg_nopr_sha256");
788MODULE_ALIAS_CRYPTO("drbg_pr_sha1");
789MODULE_ALIAS_CRYPTO("drbg_nopr_sha1");
790
791/*
792 * Increment buffer
793 *
794 * @dst buffer to increment
795 * @add value to add
796 */
797static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
798 const unsigned char *add, size_t addlen)
799{
800 /* implied: dstlen > addlen */
801 unsigned char *dstptr;
802 const unsigned char *addptr;
803 unsigned int remainder = 0;
804 size_t len = addlen;
805
806 dstptr = dst + (dstlen-1);
807 addptr = add + (addlen-1);
808 while (len) {
809 remainder += *dstptr + *addptr;
810 *dstptr = remainder & 0xff;
811 remainder >>= 8;
812 len--; dstptr--; addptr--;
813 }
814 len = dstlen - addlen;
815 while (len && remainder > 0) {
816 remainder = *dstptr + 1;
817 *dstptr = remainder & 0xff;
818 remainder >>= 8;
819 len--; dstptr--;
820 }
821}
822
799/* 823/*
800 * scratchpad usage: as drbg_hash_update and drbg_hash_df are used 824 * scratchpad usage: as drbg_hash_update and drbg_hash_df are used
801 * interlinked, the scratchpad is used as follows: 825 * interlinked, the scratchpad is used as follows:
@@ -848,7 +872,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
848 } 872 }
849 873
850out: 874out:
851 memset(tmp, 0, drbg_blocklen(drbg)); 875 memzero_explicit(tmp, drbg_blocklen(drbg));
852 return ret; 876 return ret;
853} 877}
854 878
@@ -892,7 +916,7 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
892 ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2); 916 ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
893 917
894out: 918out:
895 memset(drbg->scratchpad, 0, drbg_statelen(drbg)); 919 memzero_explicit(drbg->scratchpad, drbg_statelen(drbg));
896 return ret; 920 return ret;
897} 921}
898 922
@@ -927,7 +951,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
927 drbg->scratchpad, drbg_blocklen(drbg)); 951 drbg->scratchpad, drbg_blocklen(drbg));
928 952
929out: 953out:
930 memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); 954 memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
931 return ret; 955 return ret;
932} 956}
933 957
@@ -942,7 +966,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
942 unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg); 966 unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg);
943 struct drbg_string data; 967 struct drbg_string data;
944 LIST_HEAD(datalist); 968 LIST_HEAD(datalist);
945 unsigned char prefix = DRBG_PREFIX1;
946 969
947 memset(src, 0, drbg_statelen(drbg)); 970 memset(src, 0, drbg_statelen(drbg));
948 memset(dst, 0, drbg_blocklen(drbg)); 971 memset(dst, 0, drbg_blocklen(drbg));
@@ -963,7 +986,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
963 outlen = (drbg_blocklen(drbg) < (buflen - len)) ? 986 outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
964 drbg_blocklen(drbg) : (buflen - len); 987 drbg_blocklen(drbg) : (buflen - len);
965 if (!drbg_fips_continuous_test(drbg, dst)) { 988 if (!drbg_fips_continuous_test(drbg, dst)) {
966 drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1); 989 crypto_inc(src, drbg_statelen(drbg));
967 continue; 990 continue;
968 } 991 }
969 /* 10.1.1.4 step hashgen 4.2 */ 992 /* 10.1.1.4 step hashgen 4.2 */
@@ -971,11 +994,11 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
971 len += outlen; 994 len += outlen;
972 /* 10.1.1.4 hashgen step 4.3 */ 995 /* 10.1.1.4 hashgen step 4.3 */
973 if (len < buflen) 996 if (len < buflen)
974 drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1); 997 crypto_inc(src, drbg_statelen(drbg));
975 } 998 }
976 999
977out: 1000out:
978 memset(drbg->scratchpad, 0, 1001 memzero_explicit(drbg->scratchpad,
979 (drbg_statelen(drbg) + drbg_blocklen(drbg))); 1002 (drbg_statelen(drbg) + drbg_blocklen(drbg)));
980 return len; 1003 return len;
981} 1004}
@@ -1024,7 +1047,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
1024 drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8); 1047 drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
1025 1048
1026out: 1049out:
1027 memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); 1050 memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
1028 return len; 1051 return len;
1029} 1052}
1030 1053
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 935cfef4aa84..12011aff0971 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
185 185
186MODULE_LICENSE("GPL"); 186MODULE_LICENSE("GPL");
187MODULE_DESCRIPTION("ECB block cipher algorithm"); 187MODULE_DESCRIPTION("ECB block cipher algorithm");
188MODULE_ALIAS_CRYPTO("ecb");
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index bf7ab4a89493..f116fae766f8 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
267 267
268MODULE_LICENSE("GPL"); 268MODULE_LICENSE("GPL");
269MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator"); 269MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
270MODULE_ALIAS_CRYPTO("eseqiv");
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 021d7fec6bc8..77286ea28865 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
420MODULE_LICENSE("Dual BSD/GPL"); 420MODULE_LICENSE("Dual BSD/GPL");
421MODULE_DESCRIPTION("FCrypt Cipher Algorithm"); 421MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
422MODULE_AUTHOR("David Howells <dhowells@redhat.com>"); 422MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
423MODULE_ALIAS_CRYPTO("fcrypt");
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 276cdac567b6..2e403f6138c1 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
1441MODULE_LICENSE("GPL"); 1441MODULE_LICENSE("GPL");
1442MODULE_DESCRIPTION("Galois/Counter Mode"); 1442MODULE_DESCRIPTION("Galois/Counter Mode");
1443MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); 1443MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
1444MODULE_ALIAS("gcm_base"); 1444MODULE_ALIAS_CRYPTO("gcm_base");
1445MODULE_ALIAS("rfc4106"); 1445MODULE_ALIAS_CRYPTO("rfc4106");
1446MODULE_ALIAS("rfc4543"); 1446MODULE_ALIAS_CRYPTO("rfc4543");
1447MODULE_ALIAS_CRYPTO("gcm");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 9d3f0c69a86f..4e97fae9666f 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -172,4 +172,4 @@ module_exit(ghash_mod_exit);
172 172
173MODULE_LICENSE("GPL"); 173MODULE_LICENSE("GPL");
174MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); 174MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
175MODULE_ALIAS("ghash"); 175MODULE_ALIAS_CRYPTO("ghash");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e392219ddc61..72e38c098bb3 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -268,3 +268,4 @@ module_exit(hmac_module_exit);
268 268
269MODULE_LICENSE("GPL"); 269MODULE_LICENSE("GPL");
270MODULE_DESCRIPTION("HMAC hash algorithm"); 270MODULE_DESCRIPTION("HMAC hash algorithm");
271MODULE_ALIAS_CRYPTO("hmac");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 60e7cd66facc..873eb5ded6d7 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
880 880
881MODULE_LICENSE("GPL"); 881MODULE_LICENSE("GPL");
882MODULE_DESCRIPTION("Khazad Cryptographic Algorithm"); 882MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
883MODULE_ALIAS_CRYPTO("khazad");
diff --git a/crypto/krng.c b/crypto/krng.c
index a2d2b72fc135..67c88b331210 100644
--- a/crypto/krng.c
+++ b/crypto/krng.c
@@ -62,4 +62,4 @@ module_exit(krng_mod_fini);
62 62
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64MODULE_DESCRIPTION("Kernel Random Number Generator"); 64MODULE_DESCRIPTION("Kernel Random Number Generator");
65MODULE_ALIAS("stdrng"); 65MODULE_ALIAS_CRYPTO("stdrng");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index ba42acc4deba..6f9908a7ebcb 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
400 400
401MODULE_LICENSE("GPL"); 401MODULE_LICENSE("GPL");
402MODULE_DESCRIPTION("LRW block cipher mode"); 402MODULE_DESCRIPTION("LRW block cipher mode");
403MODULE_ALIAS_CRYPTO("lrw");
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 34d072b72a73..aefbceaf3104 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -104,3 +104,4 @@ module_exit(lz4_mod_fini);
104 104
105MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
106MODULE_DESCRIPTION("LZ4 Compression Algorithm"); 106MODULE_DESCRIPTION("LZ4 Compression Algorithm");
107MODULE_ALIAS_CRYPTO("lz4");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 9218b3fed5e3..a1d3b5bd3d85 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -104,3 +104,4 @@ module_exit(lz4hc_mod_fini);
104 104
105MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
106MODULE_DESCRIPTION("LZ4HC Compression Algorithm"); 106MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
107MODULE_ALIAS_CRYPTO("lz4hc");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index a8ff2f778dc4..4b3e92525dac 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -107,3 +107,4 @@ module_exit(lzo_mod_fini);
107 107
108MODULE_LICENSE("GPL"); 108MODULE_LICENSE("GPL");
109MODULE_DESCRIPTION("LZO Compression Algorithm"); 109MODULE_DESCRIPTION("LZO Compression Algorithm");
110MODULE_ALIAS_CRYPTO("lzo");
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index b39fbd530102..a8e870444ea9 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -703,3 +703,4 @@ module_exit(mcryptd_exit);
703 703
704MODULE_LICENSE("GPL"); 704MODULE_LICENSE("GPL");
705MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); 705MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
706MODULE_ALIAS_CRYPTO("mcryptd");
diff --git a/crypto/md4.c b/crypto/md4.c
index 0477a6a01d58..3515af425cc9 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
255 255
256MODULE_LICENSE("GPL"); 256MODULE_LICENSE("GPL");
257MODULE_DESCRIPTION("MD4 Message Digest Algorithm"); 257MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
258 258MODULE_ALIAS_CRYPTO("md4");
diff --git a/crypto/md5.c b/crypto/md5.c
index 7febeaab923b..36f5e5b103f3 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
168 168
169MODULE_LICENSE("GPL"); 169MODULE_LICENSE("GPL");
170MODULE_DESCRIPTION("MD5 Message Digest Algorithm"); 170MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
171MODULE_ALIAS_CRYPTO("md5");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 079b761bc70d..46195e0d0f4d 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
184MODULE_LICENSE("GPL v2"); 184MODULE_LICENSE("GPL v2");
185MODULE_DESCRIPTION("Michael MIC"); 185MODULE_DESCRIPTION("Michael MIC");
186MODULE_AUTHOR("Jouni Malinen <j@w1.fi>"); 186MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
187MODULE_ALIAS_CRYPTO("michael_mic");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index d1b8bdfb5855..f654965f0933 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
295 295
296MODULE_LICENSE("GPL"); 296MODULE_LICENSE("GPL");
297MODULE_DESCRIPTION("PCBC block cipher algorithm"); 297MODULE_DESCRIPTION("PCBC block cipher algorithm");
298MODULE_ALIAS_CRYPTO("pcbc");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 309d345ead95..c305d4112735 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
565MODULE_LICENSE("GPL"); 565MODULE_LICENSE("GPL");
566MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 566MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
567MODULE_DESCRIPTION("Parallel crypto wrapper"); 567MODULE_DESCRIPTION("Parallel crypto wrapper");
568MODULE_ALIAS_CRYPTO("pcrypt");
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 8a0f68b7f257..049486ede938 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
327MODULE_LICENSE("GPL"); 327MODULE_LICENSE("GPL");
328MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); 328MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
329MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); 329MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
330MODULE_ALIAS_CRYPTO("rmd128");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 525d7bb752cf..de585e51d455 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
371MODULE_LICENSE("GPL"); 371MODULE_LICENSE("GPL");
372MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); 372MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
373MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); 373MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
374MODULE_ALIAS_CRYPTO("rmd160");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 69293d9b56e0..4ec02a754e09 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
346MODULE_LICENSE("GPL"); 346MODULE_LICENSE("GPL");
347MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); 347MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
348MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); 348MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
349MODULE_ALIAS_CRYPTO("rmd256");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 09f97dfdfbba..770f2cb369f8 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
395MODULE_LICENSE("GPL"); 395MODULE_LICENSE("GPL");
396MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); 396MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
397MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); 397MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
398MODULE_ALIAS_CRYPTO("rmd320");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 9a4770c02284..3d0f9df30ac9 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -248,4 +248,4 @@ module_exit(salsa20_generic_mod_fini);
248 248
249MODULE_LICENSE("GPL"); 249MODULE_LICENSE("GPL");
250MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm"); 250MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
251MODULE_ALIAS("salsa20"); 251MODULE_ALIAS_CRYPTO("salsa20");
diff --git a/crypto/seed.c b/crypto/seed.c
index 9c904d6d2151..c6ba8438be43 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -476,3 +476,4 @@ module_exit(seed_fini);
476MODULE_DESCRIPTION("SEED Cipher Algorithm"); 476MODULE_DESCRIPTION("SEED Cipher Algorithm");
477MODULE_LICENSE("GPL"); 477MODULE_LICENSE("GPL");
478MODULE_AUTHOR("Hye-Shik Chang <perky@FreeBSD.org>, Kim Hyun <hkim@kisa.or.kr>"); 478MODULE_AUTHOR("Hye-Shik Chang <perky@FreeBSD.org>, Kim Hyun <hkim@kisa.or.kr>");
479MODULE_ALIAS_CRYPTO("seed");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index ee190fcedcd2..9daa854cc485 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
362 362
363MODULE_LICENSE("GPL"); 363MODULE_LICENSE("GPL");
364MODULE_DESCRIPTION("Sequence Number IV Generator"); 364MODULE_DESCRIPTION("Sequence Number IV Generator");
365MODULE_ALIAS_CRYPTO("seqiv");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 7ddbd7e88859..a53b5e2af335 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -665,5 +665,5 @@ module_exit(serpent_mod_fini);
665MODULE_LICENSE("GPL"); 665MODULE_LICENSE("GPL");
666MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm"); 666MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
667MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>"); 667MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
668MODULE_ALIAS("tnepres"); 668MODULE_ALIAS_CRYPTO("tnepres");
669MODULE_ALIAS("serpent"); 669MODULE_ALIAS_CRYPTO("serpent");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 7bb047432782..039e58cfa155 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -153,4 +153,4 @@ module_exit(sha1_generic_mod_fini);
153MODULE_LICENSE("GPL"); 153MODULE_LICENSE("GPL");
154MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); 154MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
155 155
156MODULE_ALIAS("sha1"); 156MODULE_ALIAS_CRYPTO("sha1");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 65e7b76b057f..5eb21b120033 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -384,5 +384,5 @@ module_exit(sha256_generic_mod_fini);
384MODULE_LICENSE("GPL"); 384MODULE_LICENSE("GPL");
385MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); 385MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
386 386
387MODULE_ALIAS("sha224"); 387MODULE_ALIAS_CRYPTO("sha224");
388MODULE_ALIAS("sha256"); 388MODULE_ALIAS_CRYPTO("sha256");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 95db67197cd9..8d0b19ed4f4b 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -288,5 +288,5 @@ module_exit(sha512_generic_mod_fini);
288MODULE_LICENSE("GPL"); 288MODULE_LICENSE("GPL");
289MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms"); 289MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
290 290
291MODULE_ALIAS("sha384"); 291MODULE_ALIAS_CRYPTO("sha384");
292MODULE_ALIAS("sha512"); 292MODULE_ALIAS_CRYPTO("sha512");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 890449e6e7ef..1d864e988ea9 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1225,15 +1225,22 @@ static inline int tcrypt_test(const char *alg)
1225 return ret; 1225 return ret;
1226} 1226}
1227 1227
1228static int do_test(int m) 1228static int do_test(const char *alg, u32 type, u32 mask, int m)
1229{ 1229{
1230 int i; 1230 int i;
1231 int ret = 0; 1231 int ret = 0;
1232 1232
1233 switch (m) { 1233 switch (m) {
1234 case 0: 1234 case 0:
1235 if (alg) {
1236 if (!crypto_has_alg(alg, type,
1237 mask ?: CRYPTO_ALG_TYPE_MASK))
1238 ret = -ENOENT;
1239 break;
1240 }
1241
1235 for (i = 1; i < 200; i++) 1242 for (i = 1; i < 200; i++)
1236 ret += do_test(i); 1243 ret += do_test(NULL, 0, 0, i);
1237 break; 1244 break;
1238 1245
1239 case 1: 1246 case 1:
@@ -1752,6 +1759,11 @@ static int do_test(int m)
1752 break; 1759 break;
1753 1760
1754 case 300: 1761 case 300:
1762 if (alg) {
1763 test_hash_speed(alg, sec, generic_hash_speed_template);
1764 break;
1765 }
1766
1755 /* fall through */ 1767 /* fall through */
1756 1768
1757 case 301: 1769 case 301:
@@ -1838,6 +1850,11 @@ static int do_test(int m)
1838 break; 1850 break;
1839 1851
1840 case 400: 1852 case 400:
1853 if (alg) {
1854 test_ahash_speed(alg, sec, generic_hash_speed_template);
1855 break;
1856 }
1857
1841 /* fall through */ 1858 /* fall through */
1842 1859
1843 case 401: 1860 case 401:
@@ -2127,12 +2144,6 @@ static int do_test(int m)
2127 return ret; 2144 return ret;
2128} 2145}
2129 2146
2130static int do_alg_test(const char *alg, u32 type, u32 mask)
2131{
2132 return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
2133 0 : -ENOENT;
2134}
2135
2136static int __init tcrypt_mod_init(void) 2147static int __init tcrypt_mod_init(void)
2137{ 2148{
2138 int err = -ENOMEM; 2149 int err = -ENOMEM;
@@ -2144,10 +2155,7 @@ static int __init tcrypt_mod_init(void)
2144 goto err_free_tv; 2155 goto err_free_tv;
2145 } 2156 }
2146 2157
2147 if (alg) 2158 err = do_test(alg, type, mask, mode);
2148 err = do_alg_test(alg, type, mask);
2149 else
2150 err = do_test(mode);
2151 2159
2152 if (err) { 2160 if (err) {
2153 printk(KERN_ERR "tcrypt: one or more tests failed!\n"); 2161 printk(KERN_ERR "tcrypt: one or more tests failed!\n");
diff --git a/crypto/tea.c b/crypto/tea.c
index 0a572323ee4a..495be2d0077d 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -270,8 +270,8 @@ static void __exit tea_mod_fini(void)
270 crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs)); 270 crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
271} 271}
272 272
273MODULE_ALIAS("xtea"); 273MODULE_ALIAS_CRYPTO("xtea");
274MODULE_ALIAS("xeta"); 274MODULE_ALIAS_CRYPTO("xeta");
275 275
276module_init(tea_mod_init); 276module_init(tea_mod_init);
277module_exit(tea_mod_fini); 277module_exit(tea_mod_fini);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 29a0cbdd0d19..037368d34586 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3708,8 +3708,7 @@ test_done:
3708 panic("%s: %s alg self test failed in fips mode!\n", driver, alg); 3708 panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
3709 3709
3710 if (fips_enabled && !rc) 3710 if (fips_enabled && !rc)
3711 pr_info(KERN_INFO "alg: self-tests for %s (%s) passed\n", 3711 pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
3712 driver, alg);
3713 3712
3714 return rc; 3713 return rc;
3715 3714
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 3c7af0d1ff7a..6e5651c66cf8 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -676,8 +676,8 @@ static void __exit tgr192_mod_fini(void)
676 crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs)); 676 crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
677} 677}
678 678
679MODULE_ALIAS("tgr160"); 679MODULE_ALIAS_CRYPTO("tgr160");
680MODULE_ALIAS("tgr128"); 680MODULE_ALIAS_CRYPTO("tgr128");
681 681
682module_init(tgr192_mod_init); 682module_init(tgr192_mod_init);
683module_exit(tgr192_mod_fini); 683module_exit(tgr192_mod_fini);
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 2d5000552d0f..523ad8c4e359 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -211,4 +211,4 @@ module_exit(twofish_mod_fini);
211 211
212MODULE_LICENSE("GPL"); 212MODULE_LICENSE("GPL");
213MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); 213MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
214MODULE_ALIAS("twofish"); 214MODULE_ALIAS_CRYPTO("twofish");
diff --git a/crypto/vmac.c b/crypto/vmac.c
index d84c24bd7ff7..df76a816cfb2 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
713 713
714MODULE_LICENSE("GPL"); 714MODULE_LICENSE("GPL");
715MODULE_DESCRIPTION("VMAC hash algorithm"); 715MODULE_DESCRIPTION("VMAC hash algorithm");
716MODULE_ALIAS_CRYPTO("vmac");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index ec64e7762fbb..0de42eb3d040 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1167,8 +1167,8 @@ static void __exit wp512_mod_fini(void)
1167 crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs)); 1167 crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
1168} 1168}
1169 1169
1170MODULE_ALIAS("wp384"); 1170MODULE_ALIAS_CRYPTO("wp384");
1171MODULE_ALIAS("wp256"); 1171MODULE_ALIAS_CRYPTO("wp256");
1172 1172
1173module_init(wp512_mod_init); 1173module_init(wp512_mod_init);
1174module_exit(wp512_mod_fini); 1174module_exit(wp512_mod_fini);
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index a5fbdf3738cf..df90b332554c 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
286 286
287MODULE_LICENSE("GPL"); 287MODULE_LICENSE("GPL");
288MODULE_DESCRIPTION("XCBC keyed hash algorithm"); 288MODULE_DESCRIPTION("XCBC keyed hash algorithm");
289MODULE_ALIAS_CRYPTO("xcbc");
diff --git a/crypto/xts.c b/crypto/xts.c
index ca1608f44cb5..f6fd43f100c8 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
362 362
363MODULE_LICENSE("GPL"); 363MODULE_LICENSE("GPL");
364MODULE_DESCRIPTION("XTS block cipher mode"); 364MODULE_DESCRIPTION("XTS block cipher mode");
365MODULE_ALIAS_CRYPTO("xts");
diff --git a/crypto/zlib.c b/crypto/zlib.c
index c9ee681d57fd..0eefa9d237ac 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
378MODULE_LICENSE("GPL"); 378MODULE_LICENSE("GPL");
379MODULE_DESCRIPTION("Zlib Compression Algorithm"); 379MODULE_DESCRIPTION("Zlib Compression Algorithm");
380MODULE_AUTHOR("Sony Corporation"); 380MODULE_AUTHOR("Sony Corporation");
381MODULE_ALIAS_CRYPTO("zlib");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 91a04ae8003c..de57b38809c7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -64,7 +64,7 @@ config HW_RANDOM_AMD
64 64
65config HW_RANDOM_ATMEL 65config HW_RANDOM_ATMEL
66 tristate "Atmel Random Number Generator support" 66 tristate "Atmel Random Number Generator support"
67 depends on ARCH_AT91 && HAVE_CLK 67 depends on ARCH_AT91 && HAVE_CLK && OF
68 default HW_RANDOM 68 default HW_RANDOM
69 ---help--- 69 ---help---
70 This driver provides kernel-side support for the Random Number 70 This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 851bc7e20ad2..0bb0b2120a63 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -67,7 +67,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
67 if (IS_ERR(trng->clk)) 67 if (IS_ERR(trng->clk))
68 return PTR_ERR(trng->clk); 68 return PTR_ERR(trng->clk);
69 69
70 ret = clk_enable(trng->clk); 70 ret = clk_prepare_enable(trng->clk);
71 if (ret) 71 if (ret)
72 return ret; 72 return ret;
73 73
@@ -95,7 +95,7 @@ static int atmel_trng_remove(struct platform_device *pdev)
95 hwrng_unregister(&trng->rng); 95 hwrng_unregister(&trng->rng);
96 96
97 writel(TRNG_KEY, trng->base + TRNG_CR); 97 writel(TRNG_KEY, trng->base + TRNG_CR);
98 clk_disable(trng->clk); 98 clk_disable_unprepare(trng->clk);
99 99
100 return 0; 100 return 0;
101} 101}
@@ -105,7 +105,7 @@ static int atmel_trng_suspend(struct device *dev)
105{ 105{
106 struct atmel_trng *trng = dev_get_drvdata(dev); 106 struct atmel_trng *trng = dev_get_drvdata(dev);
107 107
108 clk_disable(trng->clk); 108 clk_disable_unprepare(trng->clk);
109 109
110 return 0; 110 return 0;
111} 111}
@@ -114,7 +114,7 @@ static int atmel_trng_resume(struct device *dev)
114{ 114{
115 struct atmel_trng *trng = dev_get_drvdata(dev); 115 struct atmel_trng *trng = dev_get_drvdata(dev);
116 116
117 return clk_enable(trng->clk); 117 return clk_prepare_enable(trng->clk);
118} 118}
119 119
120static const struct dev_pm_ops atmel_trng_pm_ops = { 120static const struct dev_pm_ops atmel_trng_pm_ops = {
@@ -123,6 +123,12 @@ static const struct dev_pm_ops atmel_trng_pm_ops = {
123}; 123};
124#endif /* CONFIG_PM */ 124#endif /* CONFIG_PM */
125 125
126static const struct of_device_id atmel_trng_dt_ids[] = {
127 { .compatible = "atmel,at91sam9g45-trng" },
128 { /* sentinel */ }
129};
130MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
131
126static struct platform_driver atmel_trng_driver = { 132static struct platform_driver atmel_trng_driver = {
127 .probe = atmel_trng_probe, 133 .probe = atmel_trng_probe,
128 .remove = atmel_trng_remove, 134 .remove = atmel_trng_remove,
@@ -132,6 +138,7 @@ static struct platform_driver atmel_trng_driver = {
132#ifdef CONFIG_PM 138#ifdef CONFIG_PM
133 .pm = &atmel_trng_pm_ops, 139 .pm = &atmel_trng_pm_ops,
134#endif /* CONFIG_PM */ 140#endif /* CONFIG_PM */
141 .of_match_table = atmel_trng_dt_ids,
135 }, 142 },
136}; 143};
137 144
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aa30a25c8d49..1500cfd799a7 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -281,7 +281,6 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
281 char *buf) 281 char *buf)
282{ 282{
283 int err; 283 int err;
284 ssize_t ret = 0;
285 struct hwrng *rng; 284 struct hwrng *rng;
286 285
287 err = mutex_lock_interruptible(&rng_mutex); 286 err = mutex_lock_interruptible(&rng_mutex);
@@ -289,16 +288,13 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
289 return -ERESTARTSYS; 288 return -ERESTARTSYS;
290 buf[0] = '\0'; 289 buf[0] = '\0';
291 list_for_each_entry(rng, &rng_list, list) { 290 list_for_each_entry(rng, &rng_list, list) {
292 strncat(buf, rng->name, PAGE_SIZE - ret - 1); 291 strlcat(buf, rng->name, PAGE_SIZE);
293 ret += strlen(rng->name); 292 strlcat(buf, " ", PAGE_SIZE);
294 strncat(buf, " ", PAGE_SIZE - ret - 1);
295 ret++;
296 } 293 }
297 strncat(buf, "\n", PAGE_SIZE - ret - 1); 294 strlcat(buf, "\n", PAGE_SIZE);
298 ret++;
299 mutex_unlock(&rng_mutex); 295 mutex_unlock(&rng_mutex);
300 296
301 return ret; 297 return strlen(buf);
302} 298}
303 299
304static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, 300static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index b099e33cb073..e96eddc0e0b3 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -21,13 +21,13 @@
21#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/unaligned/access_ok.h>
25#include <linux/crypto.h> 24#include <linux/crypto.h>
26#include <linux/cryptohash.h> 25#include <linux/cryptohash.h>
27#include <crypto/scatterwalk.h> 26#include <crypto/scatterwalk.h>
28#include <crypto/algapi.h> 27#include <crypto/algapi.h>
29#include <crypto/hash.h> 28#include <crypto/hash.h>
30#include <crypto/internal/hash.h> 29#include <crypto/internal/hash.h>
30#include <asm/unaligned.h>
31 31
32#include <asm/dma.h> 32#include <asm/dma.h>
33#include <asm/portmux.h> 33#include <asm/portmux.h>
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index a80ea853701d..3187400daf31 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -60,6 +60,7 @@
60#define CAAM_CRA_PRIORITY 3000 60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 CTR_RFC3686_NONCE_SIZE + \
63 SHA512_DIGEST_SIZE * 2) 64 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 65/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16 66#define CAAM_MAX_IV_LENGTH 16
@@ -70,17 +71,34 @@
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ) 71#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
71#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) 72#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72 73
74/* Note: Nonce is counted in enckeylen */
75#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
76
73#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) 77#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ) 78#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) 79#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76 80
81#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
82#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
83#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
84
85#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
86#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
87#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
88#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
89
90#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
91#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
92#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
93#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
94
77#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) 95#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
78#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ 96#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
79 20 * CAAM_CMD_SZ) 97 20 * CAAM_CMD_SZ)
80#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 98#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
81 15 * CAAM_CMD_SZ) 99 15 * CAAM_CMD_SZ)
82 100
83#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ 101#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
84 CAAM_MAX_KEY_SIZE) 102 CAAM_MAX_KEY_SIZE)
85#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 103#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
86 104
@@ -128,11 +146,13 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
128/* 146/*
129 * For aead encrypt and decrypt, read iv for both classes 147 * For aead encrypt and decrypt, read iv for both classes
130 */ 148 */
131static inline void aead_append_ld_iv(u32 *desc, int ivsize) 149static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
132{ 150{
133 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 151 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
134 LDST_CLASS_1_CCB | ivsize); 152 LDST_SRCDST_BYTE_CONTEXT |
135 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); 153 (ivoffset << LDST_OFFSET_SHIFT));
154 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
155 (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
136} 156}
137 157
138/* 158/*
@@ -178,35 +198,60 @@ struct caam_ctx {
178}; 198};
179 199
180static void append_key_aead(u32 *desc, struct caam_ctx *ctx, 200static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
181 int keys_fit_inline) 201 int keys_fit_inline, bool is_rfc3686)
182{ 202{
203 u32 *nonce;
204 unsigned int enckeylen = ctx->enckeylen;
205
206 /*
207 * RFC3686 specific:
208 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
209 * | enckeylen = encryption key size + nonce size
210 */
211 if (is_rfc3686)
212 enckeylen -= CTR_RFC3686_NONCE_SIZE;
213
183 if (keys_fit_inline) { 214 if (keys_fit_inline) {
184 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 215 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
185 ctx->split_key_len, CLASS_2 | 216 ctx->split_key_len, CLASS_2 |
186 KEY_DEST_MDHA_SPLIT | KEY_ENC); 217 KEY_DEST_MDHA_SPLIT | KEY_ENC);
187 append_key_as_imm(desc, (void *)ctx->key + 218 append_key_as_imm(desc, (void *)ctx->key +
188 ctx->split_key_pad_len, ctx->enckeylen, 219 ctx->split_key_pad_len, enckeylen,
189 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 220 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
190 } else { 221 } else {
191 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | 222 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
192 KEY_DEST_MDHA_SPLIT | KEY_ENC); 223 KEY_DEST_MDHA_SPLIT | KEY_ENC);
193 append_key(desc, ctx->key_dma + ctx->split_key_pad_len, 224 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
194 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 225 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
226 }
227
228 /* Load Counter into CONTEXT1 reg */
229 if (is_rfc3686) {
230 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
231 enckeylen);
232 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
233 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
234 append_move(desc,
235 MOVE_SRC_OUTFIFO |
236 MOVE_DEST_CLASS1CTX |
237 (16 << MOVE_OFFSET_SHIFT) |
238 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
195 } 239 }
196} 240}
197 241
198static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, 242static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
199 int keys_fit_inline) 243 int keys_fit_inline, bool is_rfc3686)
200{ 244{
201 u32 *key_jump_cmd; 245 u32 *key_jump_cmd;
202 246
203 init_sh_desc(desc, HDR_SHARE_SERIAL); 247 /* Note: Context registers are saved. */
248 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
204 249
205 /* Skip if already shared */ 250 /* Skip if already shared */
206 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 251 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
207 JUMP_COND_SHRD); 252 JUMP_COND_SHRD);
208 253
209 append_key_aead(desc, ctx, keys_fit_inline); 254 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
210 255
211 set_jump_tgt_here(desc, key_jump_cmd); 256 set_jump_tgt_here(desc, key_jump_cmd);
212} 257}
@@ -406,10 +451,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
406{ 451{
407 struct aead_tfm *tfm = &aead->base.crt_aead; 452 struct aead_tfm *tfm = &aead->base.crt_aead;
408 struct caam_ctx *ctx = crypto_aead_ctx(aead); 453 struct caam_ctx *ctx = crypto_aead_ctx(aead);
454 struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
455 const char *alg_name = crypto_tfm_alg_name(ctfm);
409 struct device *jrdev = ctx->jrdev; 456 struct device *jrdev = ctx->jrdev;
410 bool keys_fit_inline = false; 457 bool keys_fit_inline;
411 u32 geniv, moveiv; 458 u32 geniv, moveiv;
459 u32 ctx1_iv_off = 0;
412 u32 *desc; 460 u32 *desc;
461 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
462 OP_ALG_AAI_CTR_MOD128);
463 const bool is_rfc3686 = (ctr_mode &&
464 (strstr(alg_name, "rfc3686") != NULL));
413 465
414 if (!ctx->authsize) 466 if (!ctx->authsize)
415 return 0; 467 return 0;
@@ -419,18 +471,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
419 return aead_null_set_sh_desc(aead); 471 return aead_null_set_sh_desc(aead);
420 472
421 /* 473 /*
474 * AES-CTR needs to load IV in CONTEXT1 reg
475 * at an offset of 128bits (16bytes)
476 * CONTEXT1[255:128] = IV
477 */
478 if (ctr_mode)
479 ctx1_iv_off = 16;
480
481 /*
482 * RFC3686 specific:
483 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
484 */
485 if (is_rfc3686)
486 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
487
488 /*
422 * Job Descriptor and Shared Descriptors 489 * Job Descriptor and Shared Descriptors
423 * must all fit into the 64-word Descriptor h/w Buffer 490 * must all fit into the 64-word Descriptor h/w Buffer
424 */ 491 */
492 keys_fit_inline = false;
425 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + 493 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
426 ctx->split_key_pad_len + ctx->enckeylen <= 494 ctx->split_key_pad_len + ctx->enckeylen +
495 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
427 CAAM_DESC_BYTES_MAX) 496 CAAM_DESC_BYTES_MAX)
428 keys_fit_inline = true; 497 keys_fit_inline = true;
429 498
430 /* aead_encrypt shared descriptor */ 499 /* aead_encrypt shared descriptor */
431 desc = ctx->sh_desc_enc; 500 desc = ctx->sh_desc_enc;
432 501
433 init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 502 /* Note: Context registers are saved. */
503 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
434 504
435 /* Class 2 operation */ 505 /* Class 2 operation */
436 append_operation(desc, ctx->class2_alg_type | 506 append_operation(desc, ctx->class2_alg_type |
@@ -448,7 +518,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
448 /* read assoc before reading payload */ 518 /* read assoc before reading payload */
449 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 519 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
450 KEY_VLF); 520 KEY_VLF);
451 aead_append_ld_iv(desc, tfm->ivsize); 521 aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
522
523 /* Load Counter into CONTEXT1 reg */
524 if (is_rfc3686)
525 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
526 LDST_CLASS_1_CCB |
527 LDST_SRCDST_BYTE_CONTEXT |
528 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
529 LDST_OFFSET_SHIFT));
452 530
453 /* Class 1 operation */ 531 /* Class 1 operation */
454 append_operation(desc, ctx->class1_alg_type | 532 append_operation(desc, ctx->class1_alg_type |
@@ -482,14 +560,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
482 */ 560 */
483 keys_fit_inline = false; 561 keys_fit_inline = false;
484 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + 562 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
485 ctx->split_key_pad_len + ctx->enckeylen <= 563 ctx->split_key_pad_len + ctx->enckeylen +
564 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
486 CAAM_DESC_BYTES_MAX) 565 CAAM_DESC_BYTES_MAX)
487 keys_fit_inline = true; 566 keys_fit_inline = true;
488 567
489 /* aead_decrypt shared descriptor */ 568 /* aead_decrypt shared descriptor */
490 desc = ctx->sh_desc_dec; 569 desc = ctx->sh_desc_dec;
491 570
492 init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 571 /* Note: Context registers are saved. */
572 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
493 573
494 /* Class 2 operation */ 574 /* Class 2 operation */
495 append_operation(desc, ctx->class2_alg_type | 575 append_operation(desc, ctx->class2_alg_type |
@@ -506,9 +586,22 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
506 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 586 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
507 KEY_VLF); 587 KEY_VLF);
508 588
509 aead_append_ld_iv(desc, tfm->ivsize); 589 aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
510 590
511 append_dec_op1(desc, ctx->class1_alg_type); 591 /* Load Counter into CONTEXT1 reg */
592 if (is_rfc3686)
593 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
594 LDST_CLASS_1_CCB |
595 LDST_SRCDST_BYTE_CONTEXT |
596 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
597 LDST_OFFSET_SHIFT));
598
599 /* Choose operation */
600 if (ctr_mode)
601 append_operation(desc, ctx->class1_alg_type |
602 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
603 else
604 append_dec_op1(desc, ctx->class1_alg_type);
512 605
513 /* Read and write cryptlen bytes */ 606 /* Read and write cryptlen bytes */
514 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); 607 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
@@ -538,14 +631,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
538 */ 631 */
539 keys_fit_inline = false; 632 keys_fit_inline = false;
540 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + 633 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
541 ctx->split_key_pad_len + ctx->enckeylen <= 634 ctx->split_key_pad_len + ctx->enckeylen +
635 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
542 CAAM_DESC_BYTES_MAX) 636 CAAM_DESC_BYTES_MAX)
543 keys_fit_inline = true; 637 keys_fit_inline = true;
544 638
545 /* aead_givencrypt shared descriptor */ 639 /* aead_givencrypt shared descriptor */
546 desc = ctx->sh_desc_givenc; 640 desc = ctx->sh_desc_givenc;
547 641
548 init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 642 /* Note: Context registers are saved. */
643 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
549 644
550 /* Generate IV */ 645 /* Generate IV */
551 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | 646 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
@@ -554,13 +649,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
554 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | 649 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
555 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 650 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
556 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); 651 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
557 append_move(desc, MOVE_SRC_INFIFO | 652 append_move(desc, MOVE_WAITCOMP |
558 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); 653 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
654 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
655 (tfm->ivsize << MOVE_LEN_SHIFT));
559 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); 656 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
560 657
561 /* Copy IV to class 1 context */ 658 /* Copy IV to class 1 context */
562 append_move(desc, MOVE_SRC_CLASS1CTX | 659 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
563 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); 660 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
661 (tfm->ivsize << MOVE_LEN_SHIFT));
564 662
565 /* Return to encryption */ 663 /* Return to encryption */
566 append_operation(desc, ctx->class2_alg_type | 664 append_operation(desc, ctx->class2_alg_type |
@@ -576,7 +674,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
576 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 674 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
577 KEY_VLF); 675 KEY_VLF);
578 676
579 /* Copy iv from class 1 ctx to class 2 fifo*/ 677 /* Copy iv from outfifo to class 2 fifo */
580 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | 678 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
581 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 679 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
582 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | 680 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
@@ -584,6 +682,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
584 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | 682 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
585 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); 683 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
586 684
685 /* Load Counter into CONTEXT1 reg */
686 if (is_rfc3686)
687 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
688 LDST_CLASS_1_CCB |
689 LDST_SRCDST_BYTE_CONTEXT |
690 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
691 LDST_OFFSET_SHIFT));
692
587 /* Class 1 operation */ 693 /* Class 1 operation */
588 append_operation(desc, ctx->class1_alg_type | 694 append_operation(desc, ctx->class1_alg_type |
589 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 695 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
@@ -630,6 +736,912 @@ static int aead_setauthsize(struct crypto_aead *authenc,
630 return 0; 736 return 0;
631} 737}
632 738
739static int gcm_set_sh_desc(struct crypto_aead *aead)
740{
741 struct aead_tfm *tfm = &aead->base.crt_aead;
742 struct caam_ctx *ctx = crypto_aead_ctx(aead);
743 struct device *jrdev = ctx->jrdev;
744 bool keys_fit_inline = false;
745 u32 *key_jump_cmd, *zero_payload_jump_cmd,
746 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
747 u32 *desc;
748
749 if (!ctx->enckeylen || !ctx->authsize)
750 return 0;
751
752 /*
753 * AES GCM encrypt shared descriptor
754 * Job Descriptor and Shared Descriptor
755 * must fit into the 64-word Descriptor h/w Buffer
756 */
757 if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
758 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
759 keys_fit_inline = true;
760
761 desc = ctx->sh_desc_enc;
762
763 init_sh_desc(desc, HDR_SHARE_SERIAL);
764
765 /* skip key loading if they are loaded due to sharing */
766 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
767 JUMP_COND_SHRD | JUMP_COND_SELF);
768 if (keys_fit_inline)
769 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
770 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
771 else
772 append_key(desc, ctx->key_dma, ctx->enckeylen,
773 CLASS_1 | KEY_DEST_CLASS_REG);
774 set_jump_tgt_here(desc, key_jump_cmd);
775
776 /* class 1 operation */
777 append_operation(desc, ctx->class1_alg_type |
778 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
779
780 /* cryptlen = seqoutlen - authsize */
781 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
782
783 /* assoclen + cryptlen = seqinlen - ivsize */
784 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
785
786 /* assoclen = (assoclen + cryptlen) - cryptlen */
787 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
788
789 /* if cryptlen is ZERO jump to zero-payload commands */
790 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
791 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
792 JUMP_COND_MATH_Z);
793 /* read IV */
794 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
795 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
796
797 /* if assoclen is ZERO, skip reading the assoc data */
798 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
799 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
800 JUMP_COND_MATH_Z);
801
802 /* read assoc data */
803 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
804 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
805 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
806
807 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
808
809 /* write encrypted data */
810 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
811
812 /* read payload data */
813 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
814 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
815
816 /* jump the zero-payload commands */
817 append_jump(desc, JUMP_TEST_ALL | 7);
818
819 /* zero-payload commands */
820 set_jump_tgt_here(desc, zero_payload_jump_cmd);
821
822 /* if assoclen is ZERO, jump to IV reading - is the only input data */
823 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
824 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
825 JUMP_COND_MATH_Z);
826 /* read IV */
827 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
828 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
829
830 /* read assoc data */
831 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
832 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
833
834 /* jump to ICV writing */
835 append_jump(desc, JUMP_TEST_ALL | 2);
836
837 /* read IV - is the only input data */
838 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
839 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
840 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
841 FIFOLD_TYPE_LAST1);
842
843 /* write ICV */
844 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
845 LDST_SRCDST_BYTE_CONTEXT);
846
847 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
848 desc_bytes(desc),
849 DMA_TO_DEVICE);
850 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
851 dev_err(jrdev, "unable to map shared descriptor\n");
852 return -ENOMEM;
853 }
854#ifdef DEBUG
855 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
856 DUMP_PREFIX_ADDRESS, 16, 4, desc,
857 desc_bytes(desc), 1);
858#endif
859
860 /*
861 * Job Descriptor and Shared Descriptors
862 * must all fit into the 64-word Descriptor h/w Buffer
863 */
864 keys_fit_inline = false;
865 if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
866 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
867 keys_fit_inline = true;
868
869 desc = ctx->sh_desc_dec;
870
871 init_sh_desc(desc, HDR_SHARE_SERIAL);
872
873 /* skip key loading if they are loaded due to sharing */
874 key_jump_cmd = append_jump(desc, JUMP_JSL |
875 JUMP_TEST_ALL | JUMP_COND_SHRD |
876 JUMP_COND_SELF);
877 if (keys_fit_inline)
878 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
879 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
880 else
881 append_key(desc, ctx->key_dma, ctx->enckeylen,
882 CLASS_1 | KEY_DEST_CLASS_REG);
883 set_jump_tgt_here(desc, key_jump_cmd);
884
885 /* class 1 operation */
886 append_operation(desc, ctx->class1_alg_type |
887 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
888
889 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
890 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
891 ctx->authsize + tfm->ivsize);
892
893 /* assoclen = (assoclen + cryptlen) - cryptlen */
894 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
895 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
896
897 /* read IV */
898 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
899 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
900
901 /* jump to zero-payload command if cryptlen is zero */
902 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
903 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
904 JUMP_COND_MATH_Z);
905
906 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
907 /* if asoclen is ZERO, skip reading assoc data */
908 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
909 JUMP_COND_MATH_Z);
910 /* read assoc data */
911 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
912 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
913 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
914
915 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
916
917 /* store encrypted data */
918 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
919
920 /* read payload data */
921 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
922 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
923
924 /* jump the zero-payload commands */
925 append_jump(desc, JUMP_TEST_ALL | 4);
926
927 /* zero-payload command */
928 set_jump_tgt_here(desc, zero_payload_jump_cmd);
929
930 /* if assoclen is ZERO, jump to ICV reading */
931 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
932 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
933 JUMP_COND_MATH_Z);
934 /* read assoc data */
935 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
936 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
937 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
938
939 /* read ICV */
940 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
941 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
942
943 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
944 desc_bytes(desc),
945 DMA_TO_DEVICE);
946 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
947 dev_err(jrdev, "unable to map shared descriptor\n");
948 return -ENOMEM;
949 }
950#ifdef DEBUG
951 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
952 DUMP_PREFIX_ADDRESS, 16, 4, desc,
953 desc_bytes(desc), 1);
954#endif
955
956 return 0;
957}
958
959static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
960{
961 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
962
963 ctx->authsize = authsize;
964 gcm_set_sh_desc(authenc);
965
966 return 0;
967}
968
969static int rfc4106_set_sh_desc(struct crypto_aead *aead)
970{
971 struct aead_tfm *tfm = &aead->base.crt_aead;
972 struct caam_ctx *ctx = crypto_aead_ctx(aead);
973 struct device *jrdev = ctx->jrdev;
974 bool keys_fit_inline = false;
975 u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
976 u32 *desc;
977 u32 geniv;
978
979 if (!ctx->enckeylen || !ctx->authsize)
980 return 0;
981
982 /*
983 * RFC4106 encrypt shared descriptor
984 * Job Descriptor and Shared Descriptor
985 * must fit into the 64-word Descriptor h/w Buffer
986 */
987 if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
988 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
989 keys_fit_inline = true;
990
991 desc = ctx->sh_desc_enc;
992
993 init_sh_desc(desc, HDR_SHARE_SERIAL);
994
995 /* Skip key loading if it is loaded due to sharing */
996 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
997 JUMP_COND_SHRD);
998 if (keys_fit_inline)
999 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1000 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1001 else
1002 append_key(desc, ctx->key_dma, ctx->enckeylen,
1003 CLASS_1 | KEY_DEST_CLASS_REG);
1004 set_jump_tgt_here(desc, key_jump_cmd);
1005
1006 /* Class 1 operation */
1007 append_operation(desc, ctx->class1_alg_type |
1008 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1009
1010 /* cryptlen = seqoutlen - authsize */
1011 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1012 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1013
1014 /* assoclen + cryptlen = seqinlen - ivsize */
1015 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
1016
1017 /* assoclen = (assoclen + cryptlen) - cryptlen */
1018 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
1019
1020 /* Read Salt */
1021 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1022 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1023 /* Read AES-GCM-ESP IV */
1024 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1025 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1026
1027 /* Read assoc data */
1028 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1029 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1030
1031 /* Will read cryptlen bytes */
1032 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1033
1034 /* Write encrypted data */
1035 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1036
1037 /* Read payload data */
1038 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1039 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1040
1041 /* Write ICV */
1042 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1043 LDST_SRCDST_BYTE_CONTEXT);
1044
1045 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1046 desc_bytes(desc),
1047 DMA_TO_DEVICE);
1048 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1049 dev_err(jrdev, "unable to map shared descriptor\n");
1050 return -ENOMEM;
1051 }
1052#ifdef DEBUG
1053 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1054 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1055 desc_bytes(desc), 1);
1056#endif
1057
1058 /*
1059 * Job Descriptor and Shared Descriptors
1060 * must all fit into the 64-word Descriptor h/w Buffer
1061 */
1062 keys_fit_inline = false;
1063 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1064 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1065 keys_fit_inline = true;
1066
1067 desc = ctx->sh_desc_dec;
1068
1069 init_sh_desc(desc, HDR_SHARE_SERIAL);
1070
1071 /* Skip key loading if it is loaded due to sharing */
1072 key_jump_cmd = append_jump(desc, JUMP_JSL |
1073 JUMP_TEST_ALL | JUMP_COND_SHRD);
1074 if (keys_fit_inline)
1075 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1076 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1077 else
1078 append_key(desc, ctx->key_dma, ctx->enckeylen,
1079 CLASS_1 | KEY_DEST_CLASS_REG);
1080 set_jump_tgt_here(desc, key_jump_cmd);
1081
1082 /* Class 1 operation */
1083 append_operation(desc, ctx->class1_alg_type |
1084 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1085
1086 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
1087 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
1088 ctx->authsize + tfm->ivsize);
1089
1090 /* assoclen = (assoclen + cryptlen) - cryptlen */
1091 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1092 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1093
1094 /* Will write cryptlen bytes */
1095 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1096
1097 /* Read Salt */
1098 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1099 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1100 /* Read AES-GCM-ESP IV */
1101 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1102 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1103
1104 /* Read assoc data */
1105 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1106 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1107
1108 /* Will read cryptlen bytes */
1109 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1110
1111 /* Store payload data */
1112 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1113
1114 /* Read encrypted data */
1115 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1116 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1117
1118 /* Read ICV */
1119 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1120 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1121
1122 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1123 desc_bytes(desc),
1124 DMA_TO_DEVICE);
1125 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1126 dev_err(jrdev, "unable to map shared descriptor\n");
1127 return -ENOMEM;
1128 }
1129#ifdef DEBUG
1130 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1131 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1132 desc_bytes(desc), 1);
1133#endif
1134
1135 /*
1136 * Job Descriptor and Shared Descriptors
1137 * must all fit into the 64-word Descriptor h/w Buffer
1138 */
1139 keys_fit_inline = false;
1140 if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
1141 ctx->split_key_pad_len + ctx->enckeylen <=
1142 CAAM_DESC_BYTES_MAX)
1143 keys_fit_inline = true;
1144
1145 /* rfc4106_givencrypt shared descriptor */
1146 desc = ctx->sh_desc_givenc;
1147
1148 init_sh_desc(desc, HDR_SHARE_SERIAL);
1149
1150 /* Skip key loading if it is loaded due to sharing */
1151 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1152 JUMP_COND_SHRD);
1153 if (keys_fit_inline)
1154 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1155 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1156 else
1157 append_key(desc, ctx->key_dma, ctx->enckeylen,
1158 CLASS_1 | KEY_DEST_CLASS_REG);
1159 set_jump_tgt_here(desc, key_jump_cmd);
1160
1161 /* Generate IV */
1162 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1163 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1164 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1165 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1166 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1167 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1168 move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
1169 (tfm->ivsize << MOVE_LEN_SHIFT));
1170 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1171
1172 /* Copy generated IV to OFIFO */
1173 write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
1174 (tfm->ivsize << MOVE_LEN_SHIFT));
1175
1176 /* Class 1 operation */
1177 append_operation(desc, ctx->class1_alg_type |
1178 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1179
1180 /* ivsize + cryptlen = seqoutlen - authsize */
1181 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1182
1183 /* assoclen = seqinlen - (ivsize + cryptlen) */
1184 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1185
1186 /* Will write ivsize + cryptlen */
1187 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1188
1189 /* Read Salt and generated IV */
1190 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
1191 FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
1192 /* Append Salt */
1193 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1194 set_move_tgt_here(desc, move_cmd);
1195 set_move_tgt_here(desc, write_iv_cmd);
1196 /* Blank commands. Will be overwritten by generated IV. */
1197 append_cmd(desc, 0x00000000);
1198 append_cmd(desc, 0x00000000);
1199 /* End of blank commands */
1200
1201 /* No need to reload iv */
1202 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1203
1204 /* Read assoc data */
1205 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1206 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1207
1208 /* Will read cryptlen */
1209 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1210
1211 /* Store generated IV and encrypted data */
1212 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1213
1214 /* Read payload data */
1215 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1216 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1217
1218 /* Write ICV */
1219 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1220 LDST_SRCDST_BYTE_CONTEXT);
1221
1222 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1223 desc_bytes(desc),
1224 DMA_TO_DEVICE);
1225 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1226 dev_err(jrdev, "unable to map shared descriptor\n");
1227 return -ENOMEM;
1228 }
1229#ifdef DEBUG
1230 print_hex_dump(KERN_ERR,
1231 "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
1232 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1233 desc_bytes(desc), 1);
1234#endif
1235
1236 return 0;
1237}
1238
1239static int rfc4106_setauthsize(struct crypto_aead *authenc,
1240 unsigned int authsize)
1241{
1242 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1243
1244 ctx->authsize = authsize;
1245 rfc4106_set_sh_desc(authenc);
1246
1247 return 0;
1248}
1249
1250static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1251{
1252 struct aead_tfm *tfm = &aead->base.crt_aead;
1253 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1254 struct device *jrdev = ctx->jrdev;
1255 bool keys_fit_inline = false;
1256 u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
1257 u32 *read_move_cmd, *write_move_cmd;
1258 u32 *desc;
1259 u32 geniv;
1260
1261 if (!ctx->enckeylen || !ctx->authsize)
1262 return 0;
1263
1264 /*
1265 * RFC4543 encrypt shared descriptor
1266 * Job Descriptor and Shared Descriptor
1267 * must fit into the 64-word Descriptor h/w Buffer
1268 */
1269 if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
1270 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1271 keys_fit_inline = true;
1272
1273 desc = ctx->sh_desc_enc;
1274
1275 init_sh_desc(desc, HDR_SHARE_SERIAL);
1276
1277 /* Skip key loading if it is loaded due to sharing */
1278 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1279 JUMP_COND_SHRD);
1280 if (keys_fit_inline)
1281 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1282 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1283 else
1284 append_key(desc, ctx->key_dma, ctx->enckeylen,
1285 CLASS_1 | KEY_DEST_CLASS_REG);
1286 set_jump_tgt_here(desc, key_jump_cmd);
1287
1288 /* Class 1 operation */
1289 append_operation(desc, ctx->class1_alg_type |
1290 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1291
1292 /* Load AES-GMAC ESP IV into Math1 register */
1293 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1294 LDST_CLASS_DECO | tfm->ivsize);
1295
1296 /* Wait the DMA transaction to finish */
1297 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1298 (1 << JUMP_OFFSET_SHIFT));
1299
1300 /* Overwrite blank immediate AES-GMAC ESP IV data */
1301 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1302 (tfm->ivsize << MOVE_LEN_SHIFT));
1303
1304 /* Overwrite blank immediate AAD data */
1305 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1306 (tfm->ivsize << MOVE_LEN_SHIFT));
1307
1308 /* cryptlen = seqoutlen - authsize */
1309 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1310
1311 /* assoclen = (seqinlen - ivsize) - cryptlen */
1312 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1313
1314 /* Read Salt and AES-GMAC ESP IV */
1315 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1316 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1317 /* Append Salt */
1318 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1319 set_move_tgt_here(desc, write_iv_cmd);
1320 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1321 append_cmd(desc, 0x00000000);
1322 append_cmd(desc, 0x00000000);
1323 /* End of blank commands */
1324
1325 /* Read assoc data */
1326 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1327 FIFOLD_TYPE_AAD);
1328
1329 /* Will read cryptlen bytes */
1330 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1331
1332 /* Will write cryptlen bytes */
1333 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1334
1335 /*
1336 * MOVE_LEN opcode is not available in all SEC HW revisions,
1337 * thus need to do some magic, i.e. self-patch the descriptor
1338 * buffer.
1339 */
1340 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1341 (0x6 << MOVE_LEN_SHIFT));
1342 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1343 (0x8 << MOVE_LEN_SHIFT));
1344
1345 /* Authenticate AES-GMAC ESP IV */
1346 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1347 FIFOLD_TYPE_AAD | tfm->ivsize);
1348 set_move_tgt_here(desc, write_aad_cmd);
1349 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1350 append_cmd(desc, 0x00000000);
1351 append_cmd(desc, 0x00000000);
1352 /* End of blank commands */
1353
1354 /* Read and write cryptlen bytes */
1355 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1356
1357 set_move_tgt_here(desc, read_move_cmd);
1358 set_move_tgt_here(desc, write_move_cmd);
1359 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1360 /* Move payload data to OFIFO */
1361 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1362
1363 /* Write ICV */
1364 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1365 LDST_SRCDST_BYTE_CONTEXT);
1366
1367 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1368 desc_bytes(desc),
1369 DMA_TO_DEVICE);
1370 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1371 dev_err(jrdev, "unable to map shared descriptor\n");
1372 return -ENOMEM;
1373 }
1374#ifdef DEBUG
1375 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1376 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1377 desc_bytes(desc), 1);
1378#endif
1379
1380 /*
1381 * Job Descriptor and Shared Descriptors
1382 * must all fit into the 64-word Descriptor h/w Buffer
1383 */
1384 keys_fit_inline = false;
1385 if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
1386 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1387 keys_fit_inline = true;
1388
1389 desc = ctx->sh_desc_dec;
1390
1391 init_sh_desc(desc, HDR_SHARE_SERIAL);
1392
1393 /* Skip key loading if it is loaded due to sharing */
1394 key_jump_cmd = append_jump(desc, JUMP_JSL |
1395 JUMP_TEST_ALL | JUMP_COND_SHRD);
1396 if (keys_fit_inline)
1397 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1398 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1399 else
1400 append_key(desc, ctx->key_dma, ctx->enckeylen,
1401 CLASS_1 | KEY_DEST_CLASS_REG);
1402 set_jump_tgt_here(desc, key_jump_cmd);
1403
1404 /* Class 1 operation */
1405 append_operation(desc, ctx->class1_alg_type |
1406 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1407
1408 /* Load AES-GMAC ESP IV into Math1 register */
1409 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1410 LDST_CLASS_DECO | tfm->ivsize);
1411
1412 /* Wait the DMA transaction to finish */
1413 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1414 (1 << JUMP_OFFSET_SHIFT));
1415
1416 /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
1417 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
1418
1419 /* Overwrite blank immediate AES-GMAC ESP IV data */
1420 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1421 (tfm->ivsize << MOVE_LEN_SHIFT));
1422
1423 /* Overwrite blank immediate AAD data */
1424 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1425 (tfm->ivsize << MOVE_LEN_SHIFT));
1426
1427 /* assoclen = (assoclen + cryptlen) - cryptlen */
1428 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1429 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1430
1431 /*
1432 * MOVE_LEN opcode is not available in all SEC HW revisions,
1433 * thus need to do some magic, i.e. self-patch the descriptor
1434 * buffer.
1435 */
1436 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1437 (0x6 << MOVE_LEN_SHIFT));
1438 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1439 (0x8 << MOVE_LEN_SHIFT));
1440
1441 /* Read Salt and AES-GMAC ESP IV */
1442 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1443 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1444 /* Append Salt */
1445 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1446 set_move_tgt_here(desc, write_iv_cmd);
1447 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1448 append_cmd(desc, 0x00000000);
1449 append_cmd(desc, 0x00000000);
1450 /* End of blank commands */
1451
1452 /* Read assoc data */
1453 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1454 FIFOLD_TYPE_AAD);
1455
1456 /* Will read cryptlen bytes */
1457 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1458
1459 /* Will write cryptlen bytes */
1460 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
1461
1462 /* Authenticate AES-GMAC ESP IV */
1463 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1464 FIFOLD_TYPE_AAD | tfm->ivsize);
1465 set_move_tgt_here(desc, write_aad_cmd);
1466 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1467 append_cmd(desc, 0x00000000);
1468 append_cmd(desc, 0x00000000);
1469 /* End of blank commands */
1470
1471 /* Store payload data */
1472 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1473
1474 /* In-snoop cryptlen data */
1475 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1476 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1477
1478 set_move_tgt_here(desc, read_move_cmd);
1479 set_move_tgt_here(desc, write_move_cmd);
1480 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1481 /* Move payload data to OFIFO */
1482 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1483 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1484
1485 /* Read ICV */
1486 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1487 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1488
1489 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1490 desc_bytes(desc),
1491 DMA_TO_DEVICE);
1492 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1493 dev_err(jrdev, "unable to map shared descriptor\n");
1494 return -ENOMEM;
1495 }
1496#ifdef DEBUG
1497 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1498 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1499 desc_bytes(desc), 1);
1500#endif
1501
1502 /*
1503 * Job Descriptor and Shared Descriptors
1504 * must all fit into the 64-word Descriptor h/w Buffer
1505 */
1506 keys_fit_inline = false;
1507 if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
1508 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1509 keys_fit_inline = true;
1510
1511 /* rfc4543_givencrypt shared descriptor */
1512 desc = ctx->sh_desc_givenc;
1513
1514 init_sh_desc(desc, HDR_SHARE_SERIAL);
1515
1516 /* Skip key loading if it is loaded due to sharing */
1517 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1518 JUMP_COND_SHRD);
1519 if (keys_fit_inline)
1520 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1521 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1522 else
1523 append_key(desc, ctx->key_dma, ctx->enckeylen,
1524 CLASS_1 | KEY_DEST_CLASS_REG);
1525 set_jump_tgt_here(desc, key_jump_cmd);
1526
1527 /* Generate IV */
1528 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1529 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1530 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1531 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1532 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1533 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1534 /* Move generated IV to Math1 register */
1535 append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
1536 (tfm->ivsize << MOVE_LEN_SHIFT));
1537 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1538
1539 /* Overwrite blank immediate AES-GMAC IV data */
1540 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1541 (tfm->ivsize << MOVE_LEN_SHIFT));
1542
1543 /* Overwrite blank immediate AAD data */
1544 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1545 (tfm->ivsize << MOVE_LEN_SHIFT));
1546
1547 /* Copy generated IV to OFIFO */
1548 append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
1549 (tfm->ivsize << MOVE_LEN_SHIFT));
1550
1551 /* Class 1 operation */
1552 append_operation(desc, ctx->class1_alg_type |
1553 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1554
1555 /* ivsize + cryptlen = seqoutlen - authsize */
1556 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1557
1558 /* assoclen = seqinlen - (ivsize + cryptlen) */
1559 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1560
1561 /* Will write ivsize + cryptlen */
1562 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1563
1564 /*
1565 * MOVE_LEN opcode is not available in all SEC HW revisions,
1566 * thus need to do some magic, i.e. self-patch the descriptor
1567 * buffer.
1568 */
1569 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1570 (0x6 << MOVE_LEN_SHIFT));
1571 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1572 (0x8 << MOVE_LEN_SHIFT));
1573
1574 /* Read Salt and AES-GMAC generated IV */
1575 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1576 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1577 /* Append Salt */
1578 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1579 set_move_tgt_here(desc, write_iv_cmd);
1580 /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
1581 append_cmd(desc, 0x00000000);
1582 append_cmd(desc, 0x00000000);
1583 /* End of blank commands */
1584
1585 /* No need to reload iv */
1586 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1587
1588 /* Read assoc data */
1589 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1590 FIFOLD_TYPE_AAD);
1591
1592 /* Will read cryptlen */
1593 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1594
1595 /* Authenticate AES-GMAC IV */
1596 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1597 FIFOLD_TYPE_AAD | tfm->ivsize);
1598 set_move_tgt_here(desc, write_aad_cmd);
1599 /* Blank commands. Will be overwritten by AES-GMAC IV. */
1600 append_cmd(desc, 0x00000000);
1601 append_cmd(desc, 0x00000000);
1602 /* End of blank commands */
1603
1604 /* Read and write cryptlen bytes */
1605 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1606
1607 set_move_tgt_here(desc, read_move_cmd);
1608 set_move_tgt_here(desc, write_move_cmd);
1609 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1610 /* Move payload data to OFIFO */
1611 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1612
1613 /* Write ICV */
1614 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1615 LDST_SRCDST_BYTE_CONTEXT);
1616
1617 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1618 desc_bytes(desc),
1619 DMA_TO_DEVICE);
1620 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1621 dev_err(jrdev, "unable to map shared descriptor\n");
1622 return -ENOMEM;
1623 }
1624#ifdef DEBUG
1625 print_hex_dump(KERN_ERR,
1626 "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
1627 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1628 desc_bytes(desc), 1);
1629#endif
1630
1631 return 0;
1632}
1633
1634static int rfc4543_setauthsize(struct crypto_aead *authenc,
1635 unsigned int authsize)
1636{
1637 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1638
1639 ctx->authsize = authsize;
1640 rfc4543_set_sh_desc(authenc);
1641
1642 return 0;
1643}
1644
633static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, 1645static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
634 u32 authkeylen) 1646 u32 authkeylen)
635{ 1647{
@@ -703,20 +1715,154 @@ badkey:
703 return -EINVAL; 1715 return -EINVAL;
704} 1716}
705 1717
1718static int gcm_setkey(struct crypto_aead *aead,
1719 const u8 *key, unsigned int keylen)
1720{
1721 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1722 struct device *jrdev = ctx->jrdev;
1723 int ret = 0;
1724
1725#ifdef DEBUG
1726 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1727 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1728#endif
1729
1730 memcpy(ctx->key, key, keylen);
1731 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1732 DMA_TO_DEVICE);
1733 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1734 dev_err(jrdev, "unable to map key i/o memory\n");
1735 return -ENOMEM;
1736 }
1737 ctx->enckeylen = keylen;
1738
1739 ret = gcm_set_sh_desc(aead);
1740 if (ret) {
1741 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1742 DMA_TO_DEVICE);
1743 }
1744
1745 return ret;
1746}
1747
1748static int rfc4106_setkey(struct crypto_aead *aead,
1749 const u8 *key, unsigned int keylen)
1750{
1751 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1752 struct device *jrdev = ctx->jrdev;
1753 int ret = 0;
1754
1755 if (keylen < 4)
1756 return -EINVAL;
1757
1758#ifdef DEBUG
1759 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1760 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1761#endif
1762
1763 memcpy(ctx->key, key, keylen);
1764
1765 /*
1766 * The last four bytes of the key material are used as the salt value
1767 * in the nonce. Update the AES key length.
1768 */
1769 ctx->enckeylen = keylen - 4;
1770
1771 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1772 DMA_TO_DEVICE);
1773 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1774 dev_err(jrdev, "unable to map key i/o memory\n");
1775 return -ENOMEM;
1776 }
1777
1778 ret = rfc4106_set_sh_desc(aead);
1779 if (ret) {
1780 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1781 DMA_TO_DEVICE);
1782 }
1783
1784 return ret;
1785}
1786
1787static int rfc4543_setkey(struct crypto_aead *aead,
1788 const u8 *key, unsigned int keylen)
1789{
1790 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1791 struct device *jrdev = ctx->jrdev;
1792 int ret = 0;
1793
1794 if (keylen < 4)
1795 return -EINVAL;
1796
1797#ifdef DEBUG
1798 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1799 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1800#endif
1801
1802 memcpy(ctx->key, key, keylen);
1803
1804 /*
1805 * The last four bytes of the key material are used as the salt value
1806 * in the nonce. Update the AES key length.
1807 */
1808 ctx->enckeylen = keylen - 4;
1809
1810 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1811 DMA_TO_DEVICE);
1812 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1813 dev_err(jrdev, "unable to map key i/o memory\n");
1814 return -ENOMEM;
1815 }
1816
1817 ret = rfc4543_set_sh_desc(aead);
1818 if (ret) {
1819 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1820 DMA_TO_DEVICE);
1821 }
1822
1823 return ret;
1824}
1825
706static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 1826static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
707 const u8 *key, unsigned int keylen) 1827 const u8 *key, unsigned int keylen)
708{ 1828{
709 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1829 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
710 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; 1830 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1831 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1832 const char *alg_name = crypto_tfm_alg_name(tfm);
711 struct device *jrdev = ctx->jrdev; 1833 struct device *jrdev = ctx->jrdev;
712 int ret = 0; 1834 int ret = 0;
713 u32 *key_jump_cmd; 1835 u32 *key_jump_cmd;
714 u32 *desc; 1836 u32 *desc;
1837 u32 *nonce;
1838 u32 geniv;
1839 u32 ctx1_iv_off = 0;
1840 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1841 OP_ALG_AAI_CTR_MOD128);
1842 const bool is_rfc3686 = (ctr_mode &&
1843 (strstr(alg_name, "rfc3686") != NULL));
715 1844
716#ifdef DEBUG 1845#ifdef DEBUG
717 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 1846 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
718 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 1847 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
719#endif 1848#endif
1849 /*
1850 * AES-CTR needs to load IV in CONTEXT1 reg
1851 * at an offset of 128bits (16bytes)
1852 * CONTEXT1[255:128] = IV
1853 */
1854 if (ctr_mode)
1855 ctx1_iv_off = 16;
1856
1857 /*
1858 * RFC3686 specific:
1859 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1860 * | *key = {KEY, NONCE}
1861 */
1862 if (is_rfc3686) {
1863 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1864 keylen -= CTR_RFC3686_NONCE_SIZE;
1865 }
720 1866
721 memcpy(ctx->key, key, keylen); 1867 memcpy(ctx->key, key, keylen);
722 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 1868 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
@@ -729,7 +1875,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
729 1875
730 /* ablkcipher_encrypt shared descriptor */ 1876 /* ablkcipher_encrypt shared descriptor */
731 desc = ctx->sh_desc_enc; 1877 desc = ctx->sh_desc_enc;
732 init_sh_desc(desc, HDR_SHARE_SERIAL); 1878 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
733 /* Skip if already shared */ 1879 /* Skip if already shared */
734 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 1880 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
735 JUMP_COND_SHRD); 1881 JUMP_COND_SHRD);
@@ -739,11 +1885,31 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
739 ctx->enckeylen, CLASS_1 | 1885 ctx->enckeylen, CLASS_1 |
740 KEY_DEST_CLASS_REG); 1886 KEY_DEST_CLASS_REG);
741 1887
1888 /* Load nonce into CONTEXT1 reg */
1889 if (is_rfc3686) {
1890 nonce = (u32 *)(key + keylen);
1891 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1892 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1893 append_move(desc, MOVE_WAITCOMP |
1894 MOVE_SRC_OUTFIFO |
1895 MOVE_DEST_CLASS1CTX |
1896 (16 << MOVE_OFFSET_SHIFT) |
1897 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1898 }
1899
742 set_jump_tgt_here(desc, key_jump_cmd); 1900 set_jump_tgt_here(desc, key_jump_cmd);
743 1901
744 /* Load iv */ 1902 /* Load iv */
745 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 1903 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
746 LDST_CLASS_1_CCB | tfm->ivsize); 1904 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1905
1906 /* Load counter into CONTEXT1 reg */
1907 if (is_rfc3686)
1908 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1909 LDST_CLASS_1_CCB |
1910 LDST_SRCDST_BYTE_CONTEXT |
1911 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1912 LDST_OFFSET_SHIFT));
747 1913
748 /* Load operation */ 1914 /* Load operation */
749 append_operation(desc, ctx->class1_alg_type | 1915 append_operation(desc, ctx->class1_alg_type |
@@ -768,7 +1934,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
768 /* ablkcipher_decrypt shared descriptor */ 1934 /* ablkcipher_decrypt shared descriptor */
769 desc = ctx->sh_desc_dec; 1935 desc = ctx->sh_desc_dec;
770 1936
771 init_sh_desc(desc, HDR_SHARE_SERIAL); 1937 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
772 /* Skip if already shared */ 1938 /* Skip if already shared */
773 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 1939 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
774 JUMP_COND_SHRD); 1940 JUMP_COND_SHRD);
@@ -778,14 +1944,38 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
778 ctx->enckeylen, CLASS_1 | 1944 ctx->enckeylen, CLASS_1 |
779 KEY_DEST_CLASS_REG); 1945 KEY_DEST_CLASS_REG);
780 1946
1947 /* Load nonce into CONTEXT1 reg */
1948 if (is_rfc3686) {
1949 nonce = (u32 *)(key + keylen);
1950 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1951 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1952 append_move(desc, MOVE_WAITCOMP |
1953 MOVE_SRC_OUTFIFO |
1954 MOVE_DEST_CLASS1CTX |
1955 (16 << MOVE_OFFSET_SHIFT) |
1956 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1957 }
1958
781 set_jump_tgt_here(desc, key_jump_cmd); 1959 set_jump_tgt_here(desc, key_jump_cmd);
782 1960
783 /* load IV */ 1961 /* load IV */
784 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 1962 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
785 LDST_CLASS_1_CCB | tfm->ivsize); 1963 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1964
1965 /* Load counter into CONTEXT1 reg */
1966 if (is_rfc3686)
1967 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1968 LDST_CLASS_1_CCB |
1969 LDST_SRCDST_BYTE_CONTEXT |
1970 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1971 LDST_OFFSET_SHIFT));
786 1972
787 /* Choose operation */ 1973 /* Choose operation */
788 append_dec_op1(desc, ctx->class1_alg_type); 1974 if (ctr_mode)
1975 append_operation(desc, ctx->class1_alg_type |
1976 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1977 else
1978 append_dec_op1(desc, ctx->class1_alg_type);
789 1979
790 /* Perform operation */ 1980 /* Perform operation */
791 ablkcipher_append_src_dst(desc); 1981 ablkcipher_append_src_dst(desc);
@@ -804,6 +1994,83 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
804 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1994 DUMP_PREFIX_ADDRESS, 16, 4, desc,
805 desc_bytes(desc), 1); 1995 desc_bytes(desc), 1);
806#endif 1996#endif
1997 /* ablkcipher_givencrypt shared descriptor */
1998 desc = ctx->sh_desc_givenc;
1999
2000 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
2001 /* Skip if already shared */
2002 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2003 JUMP_COND_SHRD);
2004
2005 /* Load class1 key only */
2006 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
2007 ctx->enckeylen, CLASS_1 |
2008 KEY_DEST_CLASS_REG);
2009
2010 /* Load Nonce into CONTEXT1 reg */
2011 if (is_rfc3686) {
2012 nonce = (u32 *)(key + keylen);
2013 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
2014 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
2015 append_move(desc, MOVE_WAITCOMP |
2016 MOVE_SRC_OUTFIFO |
2017 MOVE_DEST_CLASS1CTX |
2018 (16 << MOVE_OFFSET_SHIFT) |
2019 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
2020 }
2021 set_jump_tgt_here(desc, key_jump_cmd);
2022
2023 /* Generate IV */
2024 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
2025 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
2026 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
2027 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
2028 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2029 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
2030 append_move(desc, MOVE_WAITCOMP |
2031 MOVE_SRC_INFIFO |
2032 MOVE_DEST_CLASS1CTX |
2033 (crt->ivsize << MOVE_LEN_SHIFT) |
2034 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
2035 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
2036
2037 /* Copy generated IV to memory */
2038 append_seq_store(desc, crt->ivsize,
2039 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
2040 (ctx1_iv_off << LDST_OFFSET_SHIFT));
2041
2042 /* Load Counter into CONTEXT1 reg */
2043 if (is_rfc3686)
2044 append_load_imm_u32(desc, (u32)1, LDST_IMM |
2045 LDST_CLASS_1_CCB |
2046 LDST_SRCDST_BYTE_CONTEXT |
2047 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
2048 LDST_OFFSET_SHIFT));
2049
2050 if (ctx1_iv_off)
2051 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
2052 (1 << JUMP_OFFSET_SHIFT));
2053
2054 /* Load operation */
2055 append_operation(desc, ctx->class1_alg_type |
2056 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
2057
2058 /* Perform operation */
2059 ablkcipher_append_src_dst(desc);
2060
2061 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
2062 desc_bytes(desc),
2063 DMA_TO_DEVICE);
2064 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
2065 dev_err(jrdev, "unable to map shared descriptor\n");
2066 return -ENOMEM;
2067 }
2068#ifdef DEBUG
2069 print_hex_dump(KERN_ERR,
2070 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
2071 DUMP_PREFIX_ADDRESS, 16, 4, desc,
2072 desc_bytes(desc), 1);
2073#endif
807 2074
808 return ret; 2075 return ret;
809} 2076}
@@ -1088,6 +2355,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1088 u32 out_options = 0, in_options; 2355 u32 out_options = 0, in_options;
1089 dma_addr_t dst_dma, src_dma; 2356 dma_addr_t dst_dma, src_dma;
1090 int len, sec4_sg_index = 0; 2357 int len, sec4_sg_index = 0;
2358 bool is_gcm = false;
1091 2359
1092#ifdef DEBUG 2360#ifdef DEBUG
1093 debug("assoclen %d cryptlen %d authsize %d\n", 2361 debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1106,11 +2374,19 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1106 desc_bytes(sh_desc), 1); 2374 desc_bytes(sh_desc), 1);
1107#endif 2375#endif
1108 2376
2377 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2378 OP_ALG_ALGSEL_AES) &&
2379 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2380 is_gcm = true;
2381
1109 len = desc_len(sh_desc); 2382 len = desc_len(sh_desc);
1110 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 2383 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1111 2384
1112 if (all_contig) { 2385 if (all_contig) {
1113 src_dma = sg_dma_address(req->assoc); 2386 if (is_gcm)
2387 src_dma = edesc->iv_dma;
2388 else
2389 src_dma = sg_dma_address(req->assoc);
1114 in_options = 0; 2390 in_options = 0;
1115 } else { 2391 } else {
1116 src_dma = edesc->sec4_sg_dma; 2392 src_dma = edesc->sec4_sg_dma;
@@ -1164,6 +2440,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1164 u32 out_options = 0, in_options; 2440 u32 out_options = 0, in_options;
1165 dma_addr_t dst_dma, src_dma; 2441 dma_addr_t dst_dma, src_dma;
1166 int len, sec4_sg_index = 0; 2442 int len, sec4_sg_index = 0;
2443 bool is_gcm = false;
1167 2444
1168#ifdef DEBUG 2445#ifdef DEBUG
1169 debug("assoclen %d cryptlen %d authsize %d\n", 2446 debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1181,11 +2458,19 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1181 desc_bytes(sh_desc), 1); 2458 desc_bytes(sh_desc), 1);
1182#endif 2459#endif
1183 2460
2461 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2462 OP_ALG_ALGSEL_AES) &&
2463 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2464 is_gcm = true;
2465
1184 len = desc_len(sh_desc); 2466 len = desc_len(sh_desc);
1185 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 2467 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1186 2468
1187 if (contig & GIV_SRC_CONTIG) { 2469 if (contig & GIV_SRC_CONTIG) {
1188 src_dma = sg_dma_address(req->assoc); 2470 if (is_gcm)
2471 src_dma = edesc->iv_dma;
2472 else
2473 src_dma = sg_dma_address(req->assoc);
1189 in_options = 0; 2474 in_options = 0;
1190 } else { 2475 } else {
1191 src_dma = edesc->sec4_sg_dma; 2476 src_dma = edesc->sec4_sg_dma;
@@ -1200,7 +2485,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1200 } else { 2485 } else {
1201 if (likely(req->src == req->dst)) { 2486 if (likely(req->src == req->dst)) {
1202 dst_dma = src_dma + sizeof(struct sec4_sg_entry) * 2487 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1203 edesc->assoc_nents; 2488 (edesc->assoc_nents +
2489 (is_gcm ? 1 + edesc->src_nents : 0));
1204 out_options = LDST_SGF; 2490 out_options = LDST_SGF;
1205 } else { 2491 } else {
1206 dst_dma = edesc->sec4_sg_dma + 2492 dst_dma = edesc->sec4_sg_dma +
@@ -1272,6 +2558,54 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1272} 2558}
1273 2559
1274/* 2560/*
2561 * Fill in ablkcipher givencrypt job descriptor
2562 */
2563static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2564 struct ablkcipher_edesc *edesc,
2565 struct ablkcipher_request *req,
2566 bool iv_contig)
2567{
2568 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2569 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2570 u32 *desc = edesc->hw_desc;
2571 u32 out_options, in_options;
2572 dma_addr_t dst_dma, src_dma;
2573 int len, sec4_sg_index = 0;
2574
2575#ifdef DEBUG
2576 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2577 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2578 ivsize, 1);
2579 print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2580 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2581 edesc->src_nents ? 100 : req->nbytes, 1);
2582#endif
2583
2584 len = desc_len(sh_desc);
2585 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2586
2587 if (!edesc->src_nents) {
2588 src_dma = sg_dma_address(req->src);
2589 in_options = 0;
2590 } else {
2591 src_dma = edesc->sec4_sg_dma;
2592 sec4_sg_index += edesc->src_nents;
2593 in_options = LDST_SGF;
2594 }
2595 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2596
2597 if (iv_contig) {
2598 dst_dma = edesc->iv_dma;
2599 out_options = 0;
2600 } else {
2601 dst_dma = edesc->sec4_sg_dma +
2602 sec4_sg_index * sizeof(struct sec4_sg_entry);
2603 out_options = LDST_SGF;
2604 }
2605 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2606}
2607
2608/*
1275 * allocate and map the aead extended descriptor 2609 * allocate and map the aead extended descriptor
1276 */ 2610 */
1277static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 2611static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -1292,6 +2626,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1292 int ivsize = crypto_aead_ivsize(aead); 2626 int ivsize = crypto_aead_ivsize(aead);
1293 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 2627 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1294 unsigned int authsize = ctx->authsize; 2628 unsigned int authsize = ctx->authsize;
2629 bool is_gcm = false;
1295 2630
1296 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 2631 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1297 2632
@@ -1326,15 +2661,31 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1326 return ERR_PTR(-ENOMEM); 2661 return ERR_PTR(-ENOMEM);
1327 } 2662 }
1328 2663
1329 /* Check if data are contiguous */ 2664 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1330 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 2665 OP_ALG_ALGSEL_AES) &&
1331 iv_dma || src_nents || iv_dma + ivsize != 2666 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1332 sg_dma_address(req->src)) { 2667 is_gcm = true;
1333 all_contig = false; 2668
2669 /*
2670 * Check if data are contiguous.
2671 * GCM expected input sequence: IV, AAD, text
2672 * All other - expected input sequence: AAD, IV, text
2673 */
2674 if (is_gcm)
2675 all_contig = (!assoc_nents &&
2676 iv_dma + ivsize == sg_dma_address(req->assoc) &&
2677 !src_nents && sg_dma_address(req->assoc) +
2678 req->assoclen == sg_dma_address(req->src));
2679 else
2680 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2681 req->assoclen == iv_dma && !src_nents &&
2682 iv_dma + ivsize == sg_dma_address(req->src));
2683 if (!all_contig) {
1334 assoc_nents = assoc_nents ? : 1; 2684 assoc_nents = assoc_nents ? : 1;
1335 src_nents = src_nents ? : 1; 2685 src_nents = src_nents ? : 1;
1336 sec4_sg_len = assoc_nents + 1 + src_nents; 2686 sec4_sg_len = assoc_nents + 1 + src_nents;
1337 } 2687 }
2688
1338 sec4_sg_len += dst_nents; 2689 sec4_sg_len += dst_nents;
1339 2690
1340 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 2691 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -1361,14 +2712,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1361 2712
1362 sec4_sg_index = 0; 2713 sec4_sg_index = 0;
1363 if (!all_contig) { 2714 if (!all_contig) {
1364 sg_to_sec4_sg(req->assoc, 2715 if (!is_gcm) {
1365 (assoc_nents ? : 1), 2716 sg_to_sec4_sg(req->assoc,
1366 edesc->sec4_sg + 2717 (assoc_nents ? : 1),
1367 sec4_sg_index, 0); 2718 edesc->sec4_sg +
1368 sec4_sg_index += assoc_nents ? : 1; 2719 sec4_sg_index, 0);
2720 sec4_sg_index += assoc_nents ? : 1;
2721 }
2722
1369 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 2723 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1370 iv_dma, ivsize, 0); 2724 iv_dma, ivsize, 0);
1371 sec4_sg_index += 1; 2725 sec4_sg_index += 1;
2726
2727 if (is_gcm) {
2728 sg_to_sec4_sg(req->assoc,
2729 (assoc_nents ? : 1),
2730 edesc->sec4_sg +
2731 sec4_sg_index, 0);
2732 sec4_sg_index += assoc_nents ? : 1;
2733 }
2734
1372 sg_to_sec4_sg_last(req->src, 2735 sg_to_sec4_sg_last(req->src,
1373 (src_nents ? : 1), 2736 (src_nents ? : 1),
1374 edesc->sec4_sg + 2737 edesc->sec4_sg +
@@ -1490,6 +2853,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1490 int ivsize = crypto_aead_ivsize(aead); 2853 int ivsize = crypto_aead_ivsize(aead);
1491 bool assoc_chained = false, src_chained = false, dst_chained = false; 2854 bool assoc_chained = false, src_chained = false, dst_chained = false;
1492 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 2855 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2856 bool is_gcm = false;
1493 2857
1494 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 2858 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1495 src_nents = sg_count(req->src, req->cryptlen, &src_chained); 2859 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
@@ -1516,24 +2880,53 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1516 return ERR_PTR(-ENOMEM); 2880 return ERR_PTR(-ENOMEM);
1517 } 2881 }
1518 2882
1519 /* Check if data are contiguous */ 2883 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1520 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 2884 OP_ALG_ALGSEL_AES) &&
1521 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) 2885 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1522 contig &= ~GIV_SRC_CONTIG; 2886 is_gcm = true;
2887
2888 /*
2889 * Check if data are contiguous.
2890 * GCM expected input sequence: IV, AAD, text
2891 * All other - expected input sequence: AAD, IV, text
2892 */
2893
2894 if (is_gcm) {
2895 if (assoc_nents || iv_dma + ivsize !=
2896 sg_dma_address(req->assoc) || src_nents ||
2897 sg_dma_address(req->assoc) + req->assoclen !=
2898 sg_dma_address(req->src))
2899 contig &= ~GIV_SRC_CONTIG;
2900 } else {
2901 if (assoc_nents ||
2902 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2903 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2904 contig &= ~GIV_SRC_CONTIG;
2905 }
2906
1523 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) 2907 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1524 contig &= ~GIV_DST_CONTIG; 2908 contig &= ~GIV_DST_CONTIG;
1525 if (unlikely(req->src != req->dst)) { 2909
1526 dst_nents = dst_nents ? : 1;
1527 sec4_sg_len += 1;
1528 }
1529 if (!(contig & GIV_SRC_CONTIG)) { 2910 if (!(contig & GIV_SRC_CONTIG)) {
1530 assoc_nents = assoc_nents ? : 1; 2911 assoc_nents = assoc_nents ? : 1;
1531 src_nents = src_nents ? : 1; 2912 src_nents = src_nents ? : 1;
1532 sec4_sg_len += assoc_nents + 1 + src_nents; 2913 sec4_sg_len += assoc_nents + 1 + src_nents;
1533 if (likely(req->src == req->dst)) 2914 if (req->src == req->dst &&
2915 (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
1534 contig &= ~GIV_DST_CONTIG; 2916 contig &= ~GIV_DST_CONTIG;
1535 } 2917 }
1536 sec4_sg_len += dst_nents; 2918
2919 /*
2920 * Add new sg entries for GCM output sequence.
2921 * Expected output sequence: IV, encrypted text.
2922 */
2923 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2924 sec4_sg_len += 1 + src_nents;
2925
2926 if (unlikely(req->src != req->dst)) {
2927 dst_nents = dst_nents ? : 1;
2928 sec4_sg_len += 1 + dst_nents;
2929 }
1537 2930
1538 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 2931 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1539 2932
@@ -1559,18 +2952,36 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1559 2952
1560 sec4_sg_index = 0; 2953 sec4_sg_index = 0;
1561 if (!(contig & GIV_SRC_CONTIG)) { 2954 if (!(contig & GIV_SRC_CONTIG)) {
1562 sg_to_sec4_sg(req->assoc, assoc_nents, 2955 if (!is_gcm) {
1563 edesc->sec4_sg + 2956 sg_to_sec4_sg(req->assoc, assoc_nents,
1564 sec4_sg_index, 0); 2957 edesc->sec4_sg + sec4_sg_index, 0);
1565 sec4_sg_index += assoc_nents; 2958 sec4_sg_index += assoc_nents;
2959 }
2960
1566 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 2961 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1567 iv_dma, ivsize, 0); 2962 iv_dma, ivsize, 0);
1568 sec4_sg_index += 1; 2963 sec4_sg_index += 1;
2964
2965 if (is_gcm) {
2966 sg_to_sec4_sg(req->assoc, assoc_nents,
2967 edesc->sec4_sg + sec4_sg_index, 0);
2968 sec4_sg_index += assoc_nents;
2969 }
2970
1569 sg_to_sec4_sg_last(req->src, src_nents, 2971 sg_to_sec4_sg_last(req->src, src_nents,
1570 edesc->sec4_sg + 2972 edesc->sec4_sg +
1571 sec4_sg_index, 0); 2973 sec4_sg_index, 0);
1572 sec4_sg_index += src_nents; 2974 sec4_sg_index += src_nents;
1573 } 2975 }
2976
2977 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2978 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2979 iv_dma, ivsize, 0);
2980 sec4_sg_index += 1;
2981 sg_to_sec4_sg_last(req->src, src_nents,
2982 edesc->sec4_sg + sec4_sg_index, 0);
2983 }
2984
1574 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { 2985 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1575 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 2986 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1576 iv_dma, ivsize, 0); 2987 iv_dma, ivsize, 0);
@@ -1814,6 +3225,151 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
1814 return ret; 3225 return ret;
1815} 3226}
1816 3227
3228/*
3229 * allocate and map the ablkcipher extended descriptor
3230 * for ablkcipher givencrypt
3231 */
3232static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
3233 struct skcipher_givcrypt_request *greq,
3234 int desc_bytes,
3235 bool *iv_contig_out)
3236{
3237 struct ablkcipher_request *req = &greq->creq;
3238 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3239 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3240 struct device *jrdev = ctx->jrdev;
3241 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3242 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3243 GFP_KERNEL : GFP_ATOMIC;
3244 int src_nents, dst_nents = 0, sec4_sg_bytes;
3245 struct ablkcipher_edesc *edesc;
3246 dma_addr_t iv_dma = 0;
3247 bool iv_contig = false;
3248 int sgc;
3249 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3250 bool src_chained = false, dst_chained = false;
3251 int sec4_sg_index;
3252
3253 src_nents = sg_count(req->src, req->nbytes, &src_chained);
3254
3255 if (unlikely(req->dst != req->src))
3256 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
3257
3258 if (likely(req->src == req->dst)) {
3259 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3260 DMA_BIDIRECTIONAL, src_chained);
3261 } else {
3262 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3263 DMA_TO_DEVICE, src_chained);
3264 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3265 DMA_FROM_DEVICE, dst_chained);
3266 }
3267
3268 /*
3269 * Check if iv can be contiguous with source and destination.
3270 * If so, include it. If not, create scatterlist.
3271 */
3272 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
3273 if (dma_mapping_error(jrdev, iv_dma)) {
3274 dev_err(jrdev, "unable to map IV\n");
3275 return ERR_PTR(-ENOMEM);
3276 }
3277
3278 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
3279 iv_contig = true;
3280 else
3281 dst_nents = dst_nents ? : 1;
3282 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3283 sizeof(struct sec4_sg_entry);
3284
3285 /* allocate space for base edesc and hw desc commands, link tables */
3286 edesc = kmalloc(sizeof(*edesc) + desc_bytes +
3287 sec4_sg_bytes, GFP_DMA | flags);
3288 if (!edesc) {
3289 dev_err(jrdev, "could not allocate extended descriptor\n");
3290 return ERR_PTR(-ENOMEM);
3291 }
3292
3293 edesc->src_nents = src_nents;
3294 edesc->src_chained = src_chained;
3295 edesc->dst_nents = dst_nents;
3296 edesc->dst_chained = dst_chained;
3297 edesc->sec4_sg_bytes = sec4_sg_bytes;
3298 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3299 desc_bytes;
3300
3301 sec4_sg_index = 0;
3302 if (src_nents) {
3303 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
3304 sec4_sg_index += src_nents;
3305 }
3306
3307 if (!iv_contig) {
3308 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
3309 iv_dma, ivsize, 0);
3310 sec4_sg_index += 1;
3311 sg_to_sec4_sg_last(req->dst, dst_nents,
3312 edesc->sec4_sg + sec4_sg_index, 0);
3313 }
3314
3315 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3316 sec4_sg_bytes, DMA_TO_DEVICE);
3317 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3318 dev_err(jrdev, "unable to map S/G table\n");
3319 return ERR_PTR(-ENOMEM);
3320 }
3321 edesc->iv_dma = iv_dma;
3322
3323#ifdef DEBUG
3324 print_hex_dump(KERN_ERR,
3325 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
3326 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3327 sec4_sg_bytes, 1);
3328#endif
3329
3330 *iv_contig_out = iv_contig;
3331 return edesc;
3332}
3333
3334static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
3335{
3336 struct ablkcipher_request *req = &creq->creq;
3337 struct ablkcipher_edesc *edesc;
3338 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3339 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3340 struct device *jrdev = ctx->jrdev;
3341 bool iv_contig;
3342 u32 *desc;
3343 int ret = 0;
3344
3345 /* allocate extended descriptor */
3346 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
3347 CAAM_CMD_SZ, &iv_contig);
3348 if (IS_ERR(edesc))
3349 return PTR_ERR(edesc);
3350
3351 /* Create and submit job descriptor*/
3352 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
3353 edesc, req, iv_contig);
3354#ifdef DEBUG
3355 print_hex_dump(KERN_ERR,
3356 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
3357 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3358 desc_bytes(edesc->hw_desc), 1);
3359#endif
3360 desc = edesc->hw_desc;
3361 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3362
3363 if (!ret) {
3364 ret = -EINPROGRESS;
3365 } else {
3366 ablkcipher_unmap(jrdev, edesc, req);
3367 kfree(edesc);
3368 }
3369
3370 return ret;
3371}
3372
1817#define template_aead template_u.aead 3373#define template_aead template_u.aead
1818#define template_ablkcipher template_u.ablkcipher 3374#define template_ablkcipher template_u.ablkcipher
1819struct caam_alg_template { 3375struct caam_alg_template {
@@ -2309,17 +3865,188 @@ static struct caam_alg_template driver_algs[] = {
2309 OP_ALG_AAI_HMAC_PRECOMP, 3865 OP_ALG_AAI_HMAC_PRECOMP,
2310 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 3866 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2311 }, 3867 },
3868 {
3869 .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
3870 .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
3871 .blocksize = 1,
3872 .type = CRYPTO_ALG_TYPE_AEAD,
3873 .template_aead = {
3874 .setkey = aead_setkey,
3875 .setauthsize = aead_setauthsize,
3876 .encrypt = aead_encrypt,
3877 .decrypt = aead_decrypt,
3878 .givencrypt = aead_givencrypt,
3879 .geniv = "<built-in>",
3880 .ivsize = CTR_RFC3686_IV_SIZE,
3881 .maxauthsize = MD5_DIGEST_SIZE,
3882 },
3883 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3884 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3885 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3886 },
3887 {
3888 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
3889 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
3890 .blocksize = 1,
3891 .type = CRYPTO_ALG_TYPE_AEAD,
3892 .template_aead = {
3893 .setkey = aead_setkey,
3894 .setauthsize = aead_setauthsize,
3895 .encrypt = aead_encrypt,
3896 .decrypt = aead_decrypt,
3897 .givencrypt = aead_givencrypt,
3898 .geniv = "<built-in>",
3899 .ivsize = CTR_RFC3686_IV_SIZE,
3900 .maxauthsize = SHA1_DIGEST_SIZE,
3901 },
3902 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3903 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3904 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3905 },
3906 {
3907 .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
3908 .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
3909 .blocksize = 1,
3910 .type = CRYPTO_ALG_TYPE_AEAD,
3911 .template_aead = {
3912 .setkey = aead_setkey,
3913 .setauthsize = aead_setauthsize,
3914 .encrypt = aead_encrypt,
3915 .decrypt = aead_decrypt,
3916 .givencrypt = aead_givencrypt,
3917 .geniv = "<built-in>",
3918 .ivsize = CTR_RFC3686_IV_SIZE,
3919 .maxauthsize = SHA224_DIGEST_SIZE,
3920 },
3921 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3922 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3923 OP_ALG_AAI_HMAC_PRECOMP,
3924 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3925 },
3926 {
3927 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
3928 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
3929 .blocksize = 1,
3930 .type = CRYPTO_ALG_TYPE_AEAD,
3931 .template_aead = {
3932 .setkey = aead_setkey,
3933 .setauthsize = aead_setauthsize,
3934 .encrypt = aead_encrypt,
3935 .decrypt = aead_decrypt,
3936 .givencrypt = aead_givencrypt,
3937 .geniv = "<built-in>",
3938 .ivsize = CTR_RFC3686_IV_SIZE,
3939 .maxauthsize = SHA256_DIGEST_SIZE,
3940 },
3941 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3942 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3943 OP_ALG_AAI_HMAC_PRECOMP,
3944 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3945 },
3946 {
3947 .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
3948 .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
3949 .blocksize = 1,
3950 .type = CRYPTO_ALG_TYPE_AEAD,
3951 .template_aead = {
3952 .setkey = aead_setkey,
3953 .setauthsize = aead_setauthsize,
3954 .encrypt = aead_encrypt,
3955 .decrypt = aead_decrypt,
3956 .givencrypt = aead_givencrypt,
3957 .geniv = "<built-in>",
3958 .ivsize = CTR_RFC3686_IV_SIZE,
3959 .maxauthsize = SHA384_DIGEST_SIZE,
3960 },
3961 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3962 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3963 OP_ALG_AAI_HMAC_PRECOMP,
3964 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3965 },
3966 {
3967 .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
3968 .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
3969 .blocksize = 1,
3970 .type = CRYPTO_ALG_TYPE_AEAD,
3971 .template_aead = {
3972 .setkey = aead_setkey,
3973 .setauthsize = aead_setauthsize,
3974 .encrypt = aead_encrypt,
3975 .decrypt = aead_decrypt,
3976 .givencrypt = aead_givencrypt,
3977 .geniv = "<built-in>",
3978 .ivsize = CTR_RFC3686_IV_SIZE,
3979 .maxauthsize = SHA512_DIGEST_SIZE,
3980 },
3981 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3982 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3983 OP_ALG_AAI_HMAC_PRECOMP,
3984 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3985 },
3986 {
3987 .name = "rfc4106(gcm(aes))",
3988 .driver_name = "rfc4106-gcm-aes-caam",
3989 .blocksize = 1,
3990 .type = CRYPTO_ALG_TYPE_AEAD,
3991 .template_aead = {
3992 .setkey = rfc4106_setkey,
3993 .setauthsize = rfc4106_setauthsize,
3994 .encrypt = aead_encrypt,
3995 .decrypt = aead_decrypt,
3996 .givencrypt = aead_givencrypt,
3997 .geniv = "<built-in>",
3998 .ivsize = 8,
3999 .maxauthsize = AES_BLOCK_SIZE,
4000 },
4001 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4002 },
4003 {
4004 .name = "rfc4543(gcm(aes))",
4005 .driver_name = "rfc4543-gcm-aes-caam",
4006 .blocksize = 1,
4007 .type = CRYPTO_ALG_TYPE_AEAD,
4008 .template_aead = {
4009 .setkey = rfc4543_setkey,
4010 .setauthsize = rfc4543_setauthsize,
4011 .encrypt = aead_encrypt,
4012 .decrypt = aead_decrypt,
4013 .givencrypt = aead_givencrypt,
4014 .geniv = "<built-in>",
4015 .ivsize = 8,
4016 .maxauthsize = AES_BLOCK_SIZE,
4017 },
4018 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4019 },
4020 /* Galois Counter Mode */
4021 {
4022 .name = "gcm(aes)",
4023 .driver_name = "gcm-aes-caam",
4024 .blocksize = 1,
4025 .type = CRYPTO_ALG_TYPE_AEAD,
4026 .template_aead = {
4027 .setkey = gcm_setkey,
4028 .setauthsize = gcm_setauthsize,
4029 .encrypt = aead_encrypt,
4030 .decrypt = aead_decrypt,
4031 .givencrypt = NULL,
4032 .geniv = "<built-in>",
4033 .ivsize = 12,
4034 .maxauthsize = AES_BLOCK_SIZE,
4035 },
4036 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4037 },
2312 /* ablkcipher descriptor */ 4038 /* ablkcipher descriptor */
2313 { 4039 {
2314 .name = "cbc(aes)", 4040 .name = "cbc(aes)",
2315 .driver_name = "cbc-aes-caam", 4041 .driver_name = "cbc-aes-caam",
2316 .blocksize = AES_BLOCK_SIZE, 4042 .blocksize = AES_BLOCK_SIZE,
2317 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 4043 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2318 .template_ablkcipher = { 4044 .template_ablkcipher = {
2319 .setkey = ablkcipher_setkey, 4045 .setkey = ablkcipher_setkey,
2320 .encrypt = ablkcipher_encrypt, 4046 .encrypt = ablkcipher_encrypt,
2321 .decrypt = ablkcipher_decrypt, 4047 .decrypt = ablkcipher_decrypt,
2322 .geniv = "eseqiv", 4048 .givencrypt = ablkcipher_givencrypt,
4049 .geniv = "<built-in>",
2323 .min_keysize = AES_MIN_KEY_SIZE, 4050 .min_keysize = AES_MIN_KEY_SIZE,
2324 .max_keysize = AES_MAX_KEY_SIZE, 4051 .max_keysize = AES_MAX_KEY_SIZE,
2325 .ivsize = AES_BLOCK_SIZE, 4052 .ivsize = AES_BLOCK_SIZE,
@@ -2330,12 +4057,13 @@ static struct caam_alg_template driver_algs[] = {
2330 .name = "cbc(des3_ede)", 4057 .name = "cbc(des3_ede)",
2331 .driver_name = "cbc-3des-caam", 4058 .driver_name = "cbc-3des-caam",
2332 .blocksize = DES3_EDE_BLOCK_SIZE, 4059 .blocksize = DES3_EDE_BLOCK_SIZE,
2333 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 4060 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2334 .template_ablkcipher = { 4061 .template_ablkcipher = {
2335 .setkey = ablkcipher_setkey, 4062 .setkey = ablkcipher_setkey,
2336 .encrypt = ablkcipher_encrypt, 4063 .encrypt = ablkcipher_encrypt,
2337 .decrypt = ablkcipher_decrypt, 4064 .decrypt = ablkcipher_decrypt,
2338 .geniv = "eseqiv", 4065 .givencrypt = ablkcipher_givencrypt,
4066 .geniv = "<built-in>",
2339 .min_keysize = DES3_EDE_KEY_SIZE, 4067 .min_keysize = DES3_EDE_KEY_SIZE,
2340 .max_keysize = DES3_EDE_KEY_SIZE, 4068 .max_keysize = DES3_EDE_KEY_SIZE,
2341 .ivsize = DES3_EDE_BLOCK_SIZE, 4069 .ivsize = DES3_EDE_BLOCK_SIZE,
@@ -2346,17 +4074,53 @@ static struct caam_alg_template driver_algs[] = {
2346 .name = "cbc(des)", 4074 .name = "cbc(des)",
2347 .driver_name = "cbc-des-caam", 4075 .driver_name = "cbc-des-caam",
2348 .blocksize = DES_BLOCK_SIZE, 4076 .blocksize = DES_BLOCK_SIZE,
2349 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 4077 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2350 .template_ablkcipher = { 4078 .template_ablkcipher = {
2351 .setkey = ablkcipher_setkey, 4079 .setkey = ablkcipher_setkey,
2352 .encrypt = ablkcipher_encrypt, 4080 .encrypt = ablkcipher_encrypt,
2353 .decrypt = ablkcipher_decrypt, 4081 .decrypt = ablkcipher_decrypt,
2354 .geniv = "eseqiv", 4082 .givencrypt = ablkcipher_givencrypt,
4083 .geniv = "<built-in>",
2355 .min_keysize = DES_KEY_SIZE, 4084 .min_keysize = DES_KEY_SIZE,
2356 .max_keysize = DES_KEY_SIZE, 4085 .max_keysize = DES_KEY_SIZE,
2357 .ivsize = DES_BLOCK_SIZE, 4086 .ivsize = DES_BLOCK_SIZE,
2358 }, 4087 },
2359 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 4088 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4089 },
4090 {
4091 .name = "ctr(aes)",
4092 .driver_name = "ctr-aes-caam",
4093 .blocksize = 1,
4094 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
4095 .template_ablkcipher = {
4096 .setkey = ablkcipher_setkey,
4097 .encrypt = ablkcipher_encrypt,
4098 .decrypt = ablkcipher_decrypt,
4099 .geniv = "chainiv",
4100 .min_keysize = AES_MIN_KEY_SIZE,
4101 .max_keysize = AES_MAX_KEY_SIZE,
4102 .ivsize = AES_BLOCK_SIZE,
4103 },
4104 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
4105 },
4106 {
4107 .name = "rfc3686(ctr(aes))",
4108 .driver_name = "rfc3686-ctr-aes-caam",
4109 .blocksize = 1,
4110 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
4111 .template_ablkcipher = {
4112 .setkey = ablkcipher_setkey,
4113 .encrypt = ablkcipher_encrypt,
4114 .decrypt = ablkcipher_decrypt,
4115 .givencrypt = ablkcipher_givencrypt,
4116 .geniv = "<built-in>",
4117 .min_keysize = AES_MIN_KEY_SIZE +
4118 CTR_RFC3686_NONCE_SIZE,
4119 .max_keysize = AES_MAX_KEY_SIZE +
4120 CTR_RFC3686_NONCE_SIZE,
4121 .ivsize = CTR_RFC3686_IV_SIZE,
4122 },
4123 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2360 } 4124 }
2361}; 4125};
2362 4126
@@ -2457,6 +4221,10 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2457 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 4221 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2458 template->type; 4222 template->type;
2459 switch (template->type) { 4223 switch (template->type) {
4224 case CRYPTO_ALG_TYPE_GIVCIPHER:
4225 alg->cra_type = &crypto_givcipher_type;
4226 alg->cra_ablkcipher = template->template_ablkcipher;
4227 break;
2460 case CRYPTO_ALG_TYPE_ABLKCIPHER: 4228 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2461 alg->cra_type = &crypto_ablkcipher_type; 4229 alg->cra_type = &crypto_ablkcipher_type;
2462 alg->cra_ablkcipher = template->template_ablkcipher; 4230 alg->cra_ablkcipher = template->template_ablkcipher;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index f227922cea38..acd7743e2603 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -28,6 +28,7 @@
28#include <crypto/algapi.h> 28#include <crypto/algapi.h>
29#include <crypto/null.h> 29#include <crypto/null.h>
30#include <crypto/aes.h> 30#include <crypto/aes.h>
31#include <crypto/ctr.h>
31#include <crypto/des.h> 32#include <crypto/des.h>
32#include <crypto/sha.h> 33#include <crypto/sha.h>
33#include <crypto/md5.h> 34#include <crypto/md5.h>
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 7eec20bb3849..9f79fd7bd4d7 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -192,6 +192,8 @@ static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
192 PRINT_POS; \ 192 PRINT_POS; \
193 append_cmd(desc, CMD_##op | len | options); \ 193 append_cmd(desc, CMD_##op | len | options); \
194} 194}
195
196APPEND_CMD_LEN(seq_load, SEQ_LOAD)
195APPEND_CMD_LEN(seq_store, SEQ_STORE) 197APPEND_CMD_LEN(seq_store, SEQ_STORE)
196APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD) 198APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
197APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE) 199APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 6531054a44c8..66d73bf54166 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -213,27 +213,36 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
213 void (*report_ssed)(struct device *jrdev, const u32 status, 213 void (*report_ssed)(struct device *jrdev, const u32 status,
214 const char *error); 214 const char *error);
215 const char *error; 215 const char *error;
216 } status_src[] = { 216 } status_src[16] = {
217 { NULL, "No error" }, 217 { NULL, "No error" },
218 { NULL, NULL }, 218 { NULL, NULL },
219 { report_ccb_status, "CCB" }, 219 { report_ccb_status, "CCB" },
220 { report_jump_status, "Jump" }, 220 { report_jump_status, "Jump" },
221 { report_deco_status, "DECO" }, 221 { report_deco_status, "DECO" },
222 { NULL, NULL }, 222 { NULL, "Queue Manager Interface" },
223 { report_jr_status, "Job Ring" }, 223 { report_jr_status, "Job Ring" },
224 { report_cond_code_status, "Condition Code" }, 224 { report_cond_code_status, "Condition Code" },
225 { NULL, NULL },
226 { NULL, NULL },
227 { NULL, NULL },
228 { NULL, NULL },
229 { NULL, NULL },
230 { NULL, NULL },
231 { NULL, NULL },
232 { NULL, NULL },
225 }; 233 };
226 u32 ssrc = status >> JRSTA_SSRC_SHIFT; 234 u32 ssrc = status >> JRSTA_SSRC_SHIFT;
227 const char *error = status_src[ssrc].error; 235 const char *error = status_src[ssrc].error;
228 236
229 /* 237 /*
230 * If there is no further error handling function, just 238 * If there is an error handling function, call it to report the error.
231 * print the error code, error string and exit. Otherwise 239 * Otherwise print the error source name.
232 * call the handler function.
233 */ 240 */
234 if (!status_src[ssrc].report_ssed) 241 if (status_src[ssrc].report_ssed)
235 dev_err(jrdev, "%08x: %s: \n", status, status_src[ssrc].error);
236 else
237 status_src[ssrc].report_ssed(jrdev, status, error); 242 status_src[ssrc].report_ssed(jrdev, status, error);
243 else if (error)
244 dev_err(jrdev, "%d: %s\n", ssrc, error);
245 else
246 dev_err(jrdev, "%d: unknown error source\n", ssrc);
238} 247}
239EXPORT_SYMBOL(caam_jr_strstatus); 248EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 4d18e27ffa9e..9207c907a128 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -181,8 +181,6 @@ static void caam_jr_dequeue(unsigned long devarg)
181 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { 181 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
182 sw_idx = (tail + i) & (JOBR_DEPTH - 1); 182 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
183 183
184 smp_read_barrier_depends();
185
186 if (jrp->outring[hw_idx].desc == 184 if (jrp->outring[hw_idx].desc ==
187 jrp->entinfo[sw_idx].desc_addr_dma) 185 jrp->entinfo[sw_idx].desc_addr_dma)
188 break; /* found */ 186 break; /* found */
@@ -218,7 +216,6 @@ static void caam_jr_dequeue(unsigned long devarg)
218 if (sw_idx == tail) { 216 if (sw_idx == tail) {
219 do { 217 do {
220 tail = (tail + 1) & (JOBR_DEPTH - 1); 218 tail = (tail + 1) & (JOBR_DEPTH - 1);
221 smp_read_barrier_depends();
222 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && 219 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
223 jrp->entinfo[tail].desc_addr_dma == 0); 220 jrp->entinfo[tail].desc_addr_dma == 0);
224 221
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index cc00b52306ba..a066cc3450ae 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -72,27 +72,19 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
73 unsigned long irq_flags; 73 unsigned long irq_flags;
74 unsigned int processed = 0, to_process; 74 unsigned int processed = 0, to_process;
75 u32 max_sg_len;
76 int rc; 75 int rc;
77 76
78 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 77 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
79 78
80 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
81 nx_ctx->ap->sglen);
82
83 if (enc) 79 if (enc)
84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 80 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
85 else 81 else
86 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 82 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
87 83
88 do { 84 do {
89 to_process = min_t(u64, nbytes - processed, 85 to_process = nbytes - processed;
90 nx_ctx->ap->databytelen);
91 to_process = min_t(u64, to_process,
92 NX_PAGE_SIZE * (max_sg_len - 1));
93 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
94 86
95 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, 87 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
96 processed, csbcpb->cpb.aes_cbc.iv); 88 processed, csbcpb->cpb.aes_cbc.iv);
97 if (rc) 89 if (rc)
98 goto out; 90 goto out;
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5ecd4c2414aa..67f80813a06f 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -181,6 +181,7 @@ static int generate_pat(u8 *iv,
181 unsigned int iauth_len = 0; 181 unsigned int iauth_len = 0;
182 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; 182 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
183 int rc; 183 int rc;
184 unsigned int max_sg_len;
184 185
185 /* zero the ctr value */ 186 /* zero the ctr value */
186 memset(iv + 15 - iv[0], 0, iv[0] + 1); 187 memset(iv + 15 - iv[0], 0, iv[0] + 1);
@@ -248,10 +249,19 @@ static int generate_pat(u8 *iv,
248 if (!req->assoclen) { 249 if (!req->assoclen) {
249 return rc; 250 return rc;
250 } else if (req->assoclen <= 14) { 251 } else if (req->assoclen <= 14) {
251 nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen); 252 unsigned int len = 16;
252 nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16, 253
254 nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
255
256 if (len != 16)
257 return -EINVAL;
258
259 nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
253 nx_ctx->ap->sglen); 260 nx_ctx->ap->sglen);
254 261
262 if (len != 16)
263 return -EINVAL;
264
255 /* inlen should be negative, indicating to phyp that its a 265 /* inlen should be negative, indicating to phyp that its a
256 * pointer to an sg list */ 266 * pointer to an sg list */
257 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * 267 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
@@ -273,21 +283,24 @@ static int generate_pat(u8 *iv,
273 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); 283 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
274 284
275 } else { 285 } else {
276 u32 max_sg_len;
277 unsigned int processed = 0, to_process; 286 unsigned int processed = 0, to_process;
278 287
279 /* page_limit: number of sg entries that fit on one page */
280 max_sg_len = min_t(u32,
281 nx_driver.of.max_sg_len/sizeof(struct nx_sg),
282 nx_ctx->ap->sglen);
283
284 processed += iauth_len; 288 processed += iauth_len;
285 289
290 /* page_limit: number of sg entries that fit on one page */
291 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
292 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
293 max_sg_len = min_t(u64, max_sg_len,
294 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
295
286 do { 296 do {
287 to_process = min_t(u32, req->assoclen - processed, 297 to_process = min_t(u32, req->assoclen - processed,
288 nx_ctx->ap->databytelen); 298 nx_ctx->ap->databytelen);
289 to_process = min_t(u64, to_process, 299
290 NX_PAGE_SIZE * (max_sg_len - 1)); 300 nx_insg = nx_walk_and_build(nx_ctx->in_sg,
301 nx_ctx->ap->sglen,
302 req->assoc, processed,
303 &to_process);
291 304
292 if ((to_process + processed) < req->assoclen) { 305 if ((to_process + processed) < req->assoclen) {
293 NX_CPB_FDM(nx_ctx->csbcpb_aead) |= 306 NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
@@ -297,10 +310,6 @@ static int generate_pat(u8 *iv,
297 ~NX_FDM_INTERMEDIATE; 310 ~NX_FDM_INTERMEDIATE;
298 } 311 }
299 312
300 nx_insg = nx_walk_and_build(nx_ctx->in_sg,
301 nx_ctx->ap->sglen,
302 req->assoc, processed,
303 to_process);
304 313
305 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * 314 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
306 sizeof(struct nx_sg); 315 sizeof(struct nx_sg);
@@ -343,7 +352,6 @@ static int ccm_nx_decrypt(struct aead_request *req,
343 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; 352 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
344 unsigned long irq_flags; 353 unsigned long irq_flags;
345 unsigned int processed = 0, to_process; 354 unsigned int processed = 0, to_process;
346 u32 max_sg_len;
347 int rc = -1; 355 int rc = -1;
348 356
349 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 357 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -360,19 +368,12 @@ static int ccm_nx_decrypt(struct aead_request *req,
360 if (rc) 368 if (rc)
361 goto out; 369 goto out;
362 370
363 /* page_limit: number of sg entries that fit on one page */
364 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
365 nx_ctx->ap->sglen);
366
367 do { 371 do {
368 372
369 /* to_process: the AES_BLOCK_SIZE data chunk to process in this 373 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
370 * update. This value is bound by sg list limits. 374 * update. This value is bound by sg list limits.
371 */ 375 */
372 to_process = min_t(u64, nbytes - processed, 376 to_process = nbytes - processed;
373 nx_ctx->ap->databytelen);
374 to_process = min_t(u64, to_process,
375 NX_PAGE_SIZE * (max_sg_len - 1));
376 377
377 if ((to_process + processed) < nbytes) 378 if ((to_process + processed) < nbytes)
378 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 379 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
@@ -382,7 +383,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
382 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 383 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
383 384
384 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, 385 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
385 to_process, processed, 386 &to_process, processed,
386 csbcpb->cpb.aes_ccm.iv_or_ctr); 387 csbcpb->cpb.aes_ccm.iv_or_ctr);
387 if (rc) 388 if (rc)
388 goto out; 389 goto out;
@@ -427,7 +428,6 @@ static int ccm_nx_encrypt(struct aead_request *req,
427 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 428 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
428 unsigned long irq_flags; 429 unsigned long irq_flags;
429 unsigned int processed = 0, to_process; 430 unsigned int processed = 0, to_process;
430 u32 max_sg_len;
431 int rc = -1; 431 int rc = -1;
432 432
433 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 433 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -437,18 +437,11 @@ static int ccm_nx_encrypt(struct aead_request *req,
437 if (rc) 437 if (rc)
438 goto out; 438 goto out;
439 439
440 /* page_limit: number of sg entries that fit on one page */
441 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
442 nx_ctx->ap->sglen);
443
444 do { 440 do {
445 /* to process: the AES_BLOCK_SIZE data chunk to process in this 441 /* to process: the AES_BLOCK_SIZE data chunk to process in this
446 * update. This value is bound by sg list limits. 442 * update. This value is bound by sg list limits.
447 */ 443 */
448 to_process = min_t(u64, nbytes - processed, 444 to_process = nbytes - processed;
449 nx_ctx->ap->databytelen);
450 to_process = min_t(u64, to_process,
451 NX_PAGE_SIZE * (max_sg_len - 1));
452 445
453 if ((to_process + processed) < nbytes) 446 if ((to_process + processed) < nbytes)
454 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 447 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
@@ -458,7 +451,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
458 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 451 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
459 452
460 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, 453 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
461 to_process, processed, 454 &to_process, processed,
462 csbcpb->cpb.aes_ccm.iv_or_ctr); 455 csbcpb->cpb.aes_ccm.iv_or_ctr);
463 if (rc) 456 if (rc)
464 goto out; 457 goto out;
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index a37d009dc75c..2617cd4d54dd 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -90,22 +90,14 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
90 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 90 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
91 unsigned long irq_flags; 91 unsigned long irq_flags;
92 unsigned int processed = 0, to_process; 92 unsigned int processed = 0, to_process;
93 u32 max_sg_len;
94 int rc; 93 int rc;
95 94
96 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 95 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
97 96
98 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
99 nx_ctx->ap->sglen);
100
101 do { 97 do {
102 to_process = min_t(u64, nbytes - processed, 98 to_process = nbytes - processed;
103 nx_ctx->ap->databytelen);
104 to_process = min_t(u64, to_process,
105 NX_PAGE_SIZE * (max_sg_len - 1));
106 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
107 99
108 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, 100 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
109 processed, csbcpb->cpb.aes_ctr.iv); 101 processed, csbcpb->cpb.aes_ctr.iv);
110 if (rc) 102 if (rc)
111 goto out; 103 goto out;
@@ -143,6 +135,7 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
143 135
144 memcpy(iv + CTR_RFC3686_NONCE_SIZE, 136 memcpy(iv + CTR_RFC3686_NONCE_SIZE,
145 desc->info, CTR_RFC3686_IV_SIZE); 137 desc->info, CTR_RFC3686_IV_SIZE);
138 iv[12] = iv[13] = iv[14] = 0;
146 iv[15] = 1; 139 iv[15] = 1;
147 140
148 desc->info = nx_ctx->priv.ctr.iv; 141 desc->info = nx_ctx->priv.ctr.iv;
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 85a8d23cf29d..cfdde8b8bc76 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -72,27 +72,19 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
73 unsigned long irq_flags; 73 unsigned long irq_flags;
74 unsigned int processed = 0, to_process; 74 unsigned int processed = 0, to_process;
75 u32 max_sg_len;
76 int rc; 75 int rc;
77 76
78 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 77 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
79 78
80 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
81 nx_ctx->ap->sglen);
82
83 if (enc) 79 if (enc)
84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 80 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
85 else 81 else
86 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 82 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
87 83
88 do { 84 do {
89 to_process = min_t(u64, nbytes - processed, 85 to_process = nbytes - processed;
90 nx_ctx->ap->databytelen);
91 to_process = min_t(u64, to_process,
92 NX_PAGE_SIZE * (max_sg_len - 1));
93 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
94 86
95 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, 87 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
96 processed, NULL); 88 processed, NULL);
97 if (rc) 89 if (rc)
98 goto out; 90 goto out;
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 025d9a8d5b19..88c562434bc0 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -131,7 +131,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
131 struct nx_sg *nx_sg = nx_ctx->in_sg; 131 struct nx_sg *nx_sg = nx_ctx->in_sg;
132 unsigned int nbytes = req->assoclen; 132 unsigned int nbytes = req->assoclen;
133 unsigned int processed = 0, to_process; 133 unsigned int processed = 0, to_process;
134 u32 max_sg_len; 134 unsigned int max_sg_len;
135 135
136 if (nbytes <= AES_BLOCK_SIZE) { 136 if (nbytes <= AES_BLOCK_SIZE) {
137 scatterwalk_start(&walk, req->assoc); 137 scatterwalk_start(&walk, req->assoc);
@@ -143,8 +143,10 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
143 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; 143 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
144 144
145 /* page_limit: number of sg entries that fit on one page */ 145 /* page_limit: number of sg entries that fit on one page */
146 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 146 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
147 nx_ctx->ap->sglen); 147 nx_ctx->ap->sglen);
148 max_sg_len = min_t(u64, max_sg_len,
149 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
148 150
149 do { 151 do {
150 /* 152 /*
@@ -156,13 +158,14 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
156 to_process = min_t(u64, to_process, 158 to_process = min_t(u64, to_process,
157 NX_PAGE_SIZE * (max_sg_len - 1)); 159 NX_PAGE_SIZE * (max_sg_len - 1));
158 160
161 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
162 req->assoc, processed, &to_process);
163
159 if ((to_process + processed) < nbytes) 164 if ((to_process + processed) < nbytes)
160 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; 165 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
161 else 166 else
162 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; 167 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
163 168
164 nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
165 req->assoc, processed, to_process);
166 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) 169 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
167 * sizeof(struct nx_sg); 170 * sizeof(struct nx_sg);
168 171
@@ -195,7 +198,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
195 struct nx_sg *nx_sg; 198 struct nx_sg *nx_sg;
196 unsigned int nbytes = req->assoclen; 199 unsigned int nbytes = req->assoclen;
197 unsigned int processed = 0, to_process; 200 unsigned int processed = 0, to_process;
198 u32 max_sg_len; 201 unsigned int max_sg_len;
199 202
200 /* Set GMAC mode */ 203 /* Set GMAC mode */
201 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; 204 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
@@ -203,8 +206,10 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
203 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 206 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
204 207
205 /* page_limit: number of sg entries that fit on one page */ 208 /* page_limit: number of sg entries that fit on one page */
206 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 209 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
207 nx_ctx->ap->sglen); 210 nx_ctx->ap->sglen);
211 max_sg_len = min_t(u64, max_sg_len,
212 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
208 213
209 /* Copy IV */ 214 /* Copy IV */
210 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); 215 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
@@ -219,13 +224,14 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
219 to_process = min_t(u64, to_process, 224 to_process = min_t(u64, to_process,
220 NX_PAGE_SIZE * (max_sg_len - 1)); 225 NX_PAGE_SIZE * (max_sg_len - 1));
221 226
227 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
228 req->assoc, processed, &to_process);
229
222 if ((to_process + processed) < nbytes) 230 if ((to_process + processed) < nbytes)
223 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 231 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
224 else 232 else
225 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 233 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
226 234
227 nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
228 req->assoc, processed, to_process);
229 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) 235 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
230 * sizeof(struct nx_sg); 236 * sizeof(struct nx_sg);
231 237
@@ -264,6 +270,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
264 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 270 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
265 char out[AES_BLOCK_SIZE]; 271 char out[AES_BLOCK_SIZE];
266 struct nx_sg *in_sg, *out_sg; 272 struct nx_sg *in_sg, *out_sg;
273 int len;
267 274
268 /* For scenarios where the input message is zero length, AES CTR mode 275 /* For scenarios where the input message is zero length, AES CTR mode
269 * may be used. Set the source data to be a single block (16B) of all 276 * may be used. Set the source data to be a single block (16B) of all
@@ -279,11 +286,22 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
279 else 286 else
280 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 287 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
281 288
289 len = AES_BLOCK_SIZE;
290
282 /* Encrypt the counter/IV */ 291 /* Encrypt the counter/IV */
283 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, 292 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
284 AES_BLOCK_SIZE, nx_ctx->ap->sglen); 293 &len, nx_ctx->ap->sglen);
285 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out), 294
295 if (len != AES_BLOCK_SIZE)
296 return -EINVAL;
297
298 len = sizeof(out);
299 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
286 nx_ctx->ap->sglen); 300 nx_ctx->ap->sglen);
301
302 if (len != sizeof(out))
303 return -EINVAL;
304
287 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 305 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
288 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 306 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
289 307
@@ -317,7 +335,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
317 unsigned int nbytes = req->cryptlen; 335 unsigned int nbytes = req->cryptlen;
318 unsigned int processed = 0, to_process; 336 unsigned int processed = 0, to_process;
319 unsigned long irq_flags; 337 unsigned long irq_flags;
320 u32 max_sg_len;
321 int rc = -EINVAL; 338 int rc = -EINVAL;
322 339
323 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 340 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -354,33 +371,24 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
354 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 371 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
355 } 372 }
356 373
357 /* page_limit: number of sg entries that fit on one page */
358 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
359 nx_ctx->ap->sglen);
360
361 do { 374 do {
362 /* 375 to_process = nbytes - processed;
363 * to_process: the data chunk to process in this update.
364 * This value is bound by sg list limits.
365 */
366 to_process = min_t(u64, nbytes - processed,
367 nx_ctx->ap->databytelen);
368 to_process = min_t(u64, to_process,
369 NX_PAGE_SIZE * (max_sg_len - 1));
370
371 if ((to_process + processed) < nbytes)
372 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
373 else
374 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
375 376
376 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 377 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
377 desc.tfm = (struct crypto_blkcipher *) req->base.tfm; 378 desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
378 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, 379 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
379 req->src, to_process, processed, 380 req->src, &to_process, processed,
380 csbcpb->cpb.aes_gcm.iv_or_cnt); 381 csbcpb->cpb.aes_gcm.iv_or_cnt);
382
381 if (rc) 383 if (rc)
382 goto out; 384 goto out;
383 385
386 if ((to_process + processed) < nbytes)
387 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
388 else
389 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
390
391
384 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 392 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
385 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 393 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
386 if (rc) 394 if (rc)
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 03c4bf57d066..8c2faffab4a3 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -75,6 +75,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
75 u8 keys[2][AES_BLOCK_SIZE]; 75 u8 keys[2][AES_BLOCK_SIZE];
76 u8 key[32]; 76 u8 key[32];
77 int rc = 0; 77 int rc = 0;
78 int len;
78 79
79 /* Change to ECB mode */ 80 /* Change to ECB mode */
80 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 81 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
@@ -86,11 +87,20 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
86 memset(keys[0], 0x01, sizeof(keys[0])); 87 memset(keys[0], 0x01, sizeof(keys[0]));
87 memset(keys[1], 0x03, sizeof(keys[1])); 88 memset(keys[1], 0x03, sizeof(keys[1]));
88 89
90 len = sizeof(keys);
89 /* Generate K1 and K3 encrypting the patterns */ 91 /* Generate K1 and K3 encrypting the patterns */
90 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys), 92 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
91 nx_ctx->ap->sglen); 93 nx_ctx->ap->sglen);
92 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys), 94
95 if (len != sizeof(keys))
96 return -EINVAL;
97
98 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
93 nx_ctx->ap->sglen); 99 nx_ctx->ap->sglen);
100
101 if (len != sizeof(keys))
102 return -EINVAL;
103
94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 104 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
95 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 105 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
96 106
@@ -103,12 +113,23 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
103 /* XOr K3 with the padding for a 0 length message */ 113 /* XOr K3 with the padding for a 0 length message */
104 keys[1][0] ^= 0x80; 114 keys[1][0] ^= 0x80;
105 115
116 len = sizeof(keys[1]);
117
106 /* Encrypt the final result */ 118 /* Encrypt the final result */
107 memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); 119 memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
108 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]), 120 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
109 nx_ctx->ap->sglen); 121 nx_ctx->ap->sglen);
110 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, 122
123 if (len != sizeof(keys[1]))
124 return -EINVAL;
125
126 len = AES_BLOCK_SIZE;
127 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
111 nx_ctx->ap->sglen); 128 nx_ctx->ap->sglen);
129
130 if (len != AES_BLOCK_SIZE)
131 return -EINVAL;
132
112 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 133 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
113 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 134 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
114 135
@@ -133,6 +154,7 @@ static int nx_xcbc_init(struct shash_desc *desc)
133 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 154 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
134 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 155 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
135 struct nx_sg *out_sg; 156 struct nx_sg *out_sg;
157 int len;
136 158
137 nx_ctx_init(nx_ctx, HCOP_FC_AES); 159 nx_ctx_init(nx_ctx, HCOP_FC_AES);
138 160
@@ -144,8 +166,13 @@ static int nx_xcbc_init(struct shash_desc *desc)
144 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); 166 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
145 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); 167 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
146 168
169 len = AES_BLOCK_SIZE;
147 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 170 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
148 AES_BLOCK_SIZE, nx_ctx->ap->sglen); 171 &len, nx_ctx->ap->sglen);
172
173 if (len != AES_BLOCK_SIZE)
174 return -EINVAL;
175
149 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 176 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
150 177
151 return 0; 178 return 0;
@@ -159,10 +186,11 @@ static int nx_xcbc_update(struct shash_desc *desc,
159 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 186 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
160 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 187 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
161 struct nx_sg *in_sg; 188 struct nx_sg *in_sg;
162 u32 to_process, leftover, total; 189 u32 to_process = 0, leftover, total;
163 u32 max_sg_len; 190 unsigned int max_sg_len;
164 unsigned long irq_flags; 191 unsigned long irq_flags;
165 int rc = 0; 192 int rc = 0;
193 int data_len;
166 194
167 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 195 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
168 196
@@ -180,17 +208,15 @@ static int nx_xcbc_update(struct shash_desc *desc,
180 } 208 }
181 209
182 in_sg = nx_ctx->in_sg; 210 in_sg = nx_ctx->in_sg;
183 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 211 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
184 nx_ctx->ap->sglen); 212 nx_ctx->ap->sglen);
213 max_sg_len = min_t(u64, max_sg_len,
214 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
185 215
186 do { 216 do {
187 217 to_process = total - to_process;
188 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
189 * update */
190 to_process = min_t(u64, total, nx_ctx->ap->databytelen);
191 to_process = min_t(u64, to_process,
192 NX_PAGE_SIZE * (max_sg_len - 1));
193 to_process = to_process & ~(AES_BLOCK_SIZE - 1); 218 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
219
194 leftover = total - to_process; 220 leftover = total - to_process;
195 221
196 /* the hardware will not accept a 0 byte operation for this 222 /* the hardware will not accept a 0 byte operation for this
@@ -204,15 +230,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
204 } 230 }
205 231
206 if (sctx->count) { 232 if (sctx->count) {
233 data_len = sctx->count;
207 in_sg = nx_build_sg_list(nx_ctx->in_sg, 234 in_sg = nx_build_sg_list(nx_ctx->in_sg,
208 (u8 *) sctx->buffer, 235 (u8 *) sctx->buffer,
209 sctx->count, 236 &data_len,
210 max_sg_len); 237 max_sg_len);
238 if (data_len != sctx->count)
239 return -EINVAL;
211 } 240 }
241
242 data_len = to_process - sctx->count;
212 in_sg = nx_build_sg_list(in_sg, 243 in_sg = nx_build_sg_list(in_sg,
213 (u8 *) data, 244 (u8 *) data,
214 to_process - sctx->count, 245 &data_len,
215 max_sg_len); 246 max_sg_len);
247
248 if (data_len != to_process - sctx->count)
249 return -EINVAL;
250
216 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 251 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
217 sizeof(struct nx_sg); 252 sizeof(struct nx_sg);
218 253
@@ -263,6 +298,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
263 struct nx_sg *in_sg, *out_sg; 298 struct nx_sg *in_sg, *out_sg;
264 unsigned long irq_flags; 299 unsigned long irq_flags;
265 int rc = 0; 300 int rc = 0;
301 int len;
266 302
267 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 303 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
268 304
@@ -285,11 +321,20 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
285 * this is not an intermediate operation */ 321 * this is not an intermediate operation */
286 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 322 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
287 323
324 len = sctx->count;
288 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, 325 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
289 sctx->count, nx_ctx->ap->sglen); 326 &len, nx_ctx->ap->sglen);
290 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, 327
328 if (len != sctx->count)
329 return -EINVAL;
330
331 len = AES_BLOCK_SIZE;
332 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
291 nx_ctx->ap->sglen); 333 nx_ctx->ap->sglen);
292 334
335 if (len != AES_BLOCK_SIZE)
336 return -EINVAL;
337
293 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 338 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
294 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 339 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
295 340
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index da0b24a7633f..23621da624c3 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -23,6 +23,7 @@
23#include <crypto/sha.h> 23#include <crypto/sha.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <asm/vio.h> 25#include <asm/vio.h>
26#include <asm/byteorder.h>
26 27
27#include "nx_csbcpb.h" 28#include "nx_csbcpb.h"
28#include "nx.h" 29#include "nx.h"
@@ -32,7 +33,8 @@ static int nx_sha256_init(struct shash_desc *desc)
32{ 33{
33 struct sha256_state *sctx = shash_desc_ctx(desc); 34 struct sha256_state *sctx = shash_desc_ctx(desc);
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 35 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
35 struct nx_sg *out_sg; 36 int len;
37 int rc;
36 38
37 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 39 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
38 40
@@ -41,10 +43,28 @@ static int nx_sha256_init(struct shash_desc *desc)
41 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; 43 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
42 44
43 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); 45 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
44 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
45 SHA256_DIGEST_SIZE, nx_ctx->ap->sglen);
46 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
47 46
47 len = SHA256_DIGEST_SIZE;
48 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
49 &nx_ctx->op.outlen,
50 &len,
51 (u8 *) sctx->state,
52 NX_DS_SHA256);
53
54 if (rc)
55 goto out;
56
57 sctx->state[0] = __cpu_to_be32(SHA256_H0);
58 sctx->state[1] = __cpu_to_be32(SHA256_H1);
59 sctx->state[2] = __cpu_to_be32(SHA256_H2);
60 sctx->state[3] = __cpu_to_be32(SHA256_H3);
61 sctx->state[4] = __cpu_to_be32(SHA256_H4);
62 sctx->state[5] = __cpu_to_be32(SHA256_H5);
63 sctx->state[6] = __cpu_to_be32(SHA256_H6);
64 sctx->state[7] = __cpu_to_be32(SHA256_H7);
65 sctx->count = 0;
66
67out:
48 return 0; 68 return 0;
49} 69}
50 70
@@ -54,11 +74,11 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
54 struct sha256_state *sctx = shash_desc_ctx(desc); 74 struct sha256_state *sctx = shash_desc_ctx(desc);
55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 75 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 76 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
57 struct nx_sg *in_sg; 77 u64 to_process = 0, leftover, total;
58 u64 to_process, leftover, total;
59 u32 max_sg_len;
60 unsigned long irq_flags; 78 unsigned long irq_flags;
61 int rc = 0; 79 int rc = 0;
80 int data_len;
81 u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
62 82
63 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 83 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
64 84
@@ -66,16 +86,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
66 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 86 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
67 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover 87 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
68 */ 88 */
69 total = sctx->count + len; 89 total = (sctx->count % SHA256_BLOCK_SIZE) + len;
70 if (total < SHA256_BLOCK_SIZE) { 90 if (total < SHA256_BLOCK_SIZE) {
71 memcpy(sctx->buf + sctx->count, data, len); 91 memcpy(sctx->buf + buf_len, data, len);
72 sctx->count += len; 92 sctx->count += len;
73 goto out; 93 goto out;
74 } 94 }
75 95
76 in_sg = nx_ctx->in_sg; 96 memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
77 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 97 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
78 nx_ctx->ap->sglen); 98 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
79 99
80 do { 100 do {
81 /* 101 /*
@@ -83,34 +103,42 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
83 * this update. This value is also restricted by the sg list 103 * this update. This value is also restricted by the sg list
84 * limits. 104 * limits.
85 */ 105 */
86 to_process = min_t(u64, total, nx_ctx->ap->databytelen); 106 to_process = total - to_process;
87 to_process = min_t(u64, to_process,
88 NX_PAGE_SIZE * (max_sg_len - 1));
89 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); 107 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
90 leftover = total - to_process;
91 108
92 if (sctx->count) { 109 if (buf_len) {
93 in_sg = nx_build_sg_list(nx_ctx->in_sg, 110 data_len = buf_len;
94 (u8 *) sctx->buf, 111 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
95 sctx->count, max_sg_len); 112 &nx_ctx->op.inlen,
113 &data_len,
114 (u8 *) sctx->buf,
115 NX_DS_SHA256);
116
117 if (rc || data_len != buf_len)
118 goto out;
96 } 119 }
97 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 120
98 to_process - sctx->count, 121 data_len = to_process - buf_len;
99 max_sg_len); 122 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
100 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 123 &nx_ctx->op.inlen,
101 sizeof(struct nx_sg); 124 &data_len,
102 125 (u8 *) data,
103 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 126 NX_DS_SHA256);
104 /* 127
105 * we've hit the nx chip previously and we're updating 128 if (rc)
106 * again, so copy over the partial digest. 129 goto out;
107 */ 130
108 memcpy(csbcpb->cpb.sha256.input_partial_digest, 131 to_process = (data_len + buf_len);
132 leftover = total - to_process;
133
134 /*
135 * we've hit the nx chip previously and we're updating
136 * again, so copy over the partial digest.
137 */
138 memcpy(csbcpb->cpb.sha256.input_partial_digest,
109 csbcpb->cpb.sha256.message_digest, 139 csbcpb->cpb.sha256.message_digest,
110 SHA256_DIGEST_SIZE); 140 SHA256_DIGEST_SIZE);
111 }
112 141
113 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
114 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 142 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
115 rc = -EINVAL; 143 rc = -EINVAL;
116 goto out; 144 goto out;
@@ -122,22 +150,19 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
122 goto out; 150 goto out;
123 151
124 atomic_inc(&(nx_ctx->stats->sha256_ops)); 152 atomic_inc(&(nx_ctx->stats->sha256_ops));
125 csbcpb->cpb.sha256.message_bit_length += (u64)
126 (csbcpb->cpb.sha256.spbc * 8);
127
128 /* everything after the first update is continuation */
129 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
130 153
131 total -= to_process; 154 total -= to_process;
132 data += to_process - sctx->count; 155 data += to_process - buf_len;
133 sctx->count = 0; 156 buf_len = 0;
134 in_sg = nx_ctx->in_sg; 157
135 } while (leftover >= SHA256_BLOCK_SIZE); 158 } while (leftover >= SHA256_BLOCK_SIZE);
136 159
137 /* copy the leftover back into the state struct */ 160 /* copy the leftover back into the state struct */
138 if (leftover) 161 if (leftover)
139 memcpy(sctx->buf, data, leftover); 162 memcpy(sctx->buf, data, leftover);
140 sctx->count = leftover; 163
164 sctx->count += len;
165 memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
141out: 166out:
142 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 167 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
143 return rc; 168 return rc;
@@ -148,34 +173,46 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
148 struct sha256_state *sctx = shash_desc_ctx(desc); 173 struct sha256_state *sctx = shash_desc_ctx(desc);
149 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 174 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
150 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 175 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
151 struct nx_sg *in_sg, *out_sg;
152 u32 max_sg_len;
153 unsigned long irq_flags; 176 unsigned long irq_flags;
154 int rc; 177 int rc;
178 int len;
155 179
156 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 180 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
157 181
158 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); 182 /* final is represented by continuing the operation and indicating that
159 183 * this is not an intermediate operation */
160 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 184 if (sctx->count >= SHA256_BLOCK_SIZE) {
161 /* we've hit the nx chip previously, now we're finalizing, 185 /* we've hit the nx chip previously, now we're finalizing,
162 * so copy over the partial digest */ 186 * so copy over the partial digest */
163 memcpy(csbcpb->cpb.sha256.input_partial_digest, 187 memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
164 csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 188 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
189 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
190 } else {
191 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
192 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
165 } 193 }
166 194
167 /* final is represented by continuing the operation and indicating that 195 csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
168 * this is not an intermediate operation */
169 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
170 196
171 csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); 197 len = sctx->count & (SHA256_BLOCK_SIZE - 1);
198 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
199 &nx_ctx->op.inlen,
200 &len,
201 (u8 *) sctx->buf,
202 NX_DS_SHA256);
172 203
173 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, 204 if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
174 sctx->count, max_sg_len); 205 goto out;
175 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, 206
176 max_sg_len); 207 len = SHA256_DIGEST_SIZE;
177 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 208 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
178 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 209 &nx_ctx->op.outlen,
210 &len,
211 out,
212 NX_DS_SHA256);
213
214 if (rc || len != SHA256_DIGEST_SIZE)
215 goto out;
179 216
180 if (!nx_ctx->op.outlen) { 217 if (!nx_ctx->op.outlen) {
181 rc = -EINVAL; 218 rc = -EINVAL;
@@ -189,8 +226,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
189 226
190 atomic_inc(&(nx_ctx->stats->sha256_ops)); 227 atomic_inc(&(nx_ctx->stats->sha256_ops));
191 228
192 atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8, 229 atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
193 &(nx_ctx->stats->sha256_bytes));
194 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 230 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
195out: 231out:
196 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 232 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
@@ -200,62 +236,18 @@ out:
200static int nx_sha256_export(struct shash_desc *desc, void *out) 236static int nx_sha256_export(struct shash_desc *desc, void *out)
201{ 237{
202 struct sha256_state *sctx = shash_desc_ctx(desc); 238 struct sha256_state *sctx = shash_desc_ctx(desc);
203 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
204 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
205 struct sha256_state *octx = out;
206 unsigned long irq_flags;
207
208 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
209 239
210 octx->count = sctx->count + 240 memcpy(out, sctx, sizeof(*sctx));
211 (csbcpb->cpb.sha256.message_bit_length / 8);
212 memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
213
214 /* if no data has been processed yet, we need to export SHA256's
215 * initial data, in case this context gets imported into a software
216 * context */
217 if (csbcpb->cpb.sha256.message_bit_length)
218 memcpy(octx->state, csbcpb->cpb.sha256.message_digest,
219 SHA256_DIGEST_SIZE);
220 else {
221 octx->state[0] = SHA256_H0;
222 octx->state[1] = SHA256_H1;
223 octx->state[2] = SHA256_H2;
224 octx->state[3] = SHA256_H3;
225 octx->state[4] = SHA256_H4;
226 octx->state[5] = SHA256_H5;
227 octx->state[6] = SHA256_H6;
228 octx->state[7] = SHA256_H7;
229 }
230 241
231 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
232 return 0; 242 return 0;
233} 243}
234 244
235static int nx_sha256_import(struct shash_desc *desc, const void *in) 245static int nx_sha256_import(struct shash_desc *desc, const void *in)
236{ 246{
237 struct sha256_state *sctx = shash_desc_ctx(desc); 247 struct sha256_state *sctx = shash_desc_ctx(desc);
238 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
239 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
240 const struct sha256_state *ictx = in;
241 unsigned long irq_flags;
242
243 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
244 248
245 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 249 memcpy(sctx, in, sizeof(*sctx));
246 250
247 sctx->count = ictx->count & 0x3f;
248 csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8;
249
250 if (csbcpb->cpb.sha256.message_bit_length) {
251 memcpy(csbcpb->cpb.sha256.message_digest, ictx->state,
252 SHA256_DIGEST_SIZE);
253
254 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
255 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
256 }
257
258 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
259 return 0; 251 return 0;
260} 252}
261 253
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 4ae5b0f221d5..b3adf1022673 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -32,7 +32,8 @@ static int nx_sha512_init(struct shash_desc *desc)
32{ 32{
33 struct sha512_state *sctx = shash_desc_ctx(desc); 33 struct sha512_state *sctx = shash_desc_ctx(desc);
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
35 struct nx_sg *out_sg; 35 int len;
36 int rc;
36 37
37 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 38 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
38 39
@@ -41,10 +42,28 @@ static int nx_sha512_init(struct shash_desc *desc)
41 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; 42 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
42 43
43 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); 44 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
44 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
45 SHA512_DIGEST_SIZE, nx_ctx->ap->sglen);
46 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
47 45
46 len = SHA512_DIGEST_SIZE;
47 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
48 &nx_ctx->op.outlen,
49 &len,
50 (u8 *)sctx->state,
51 NX_DS_SHA512);
52
53 if (rc || len != SHA512_DIGEST_SIZE)
54 goto out;
55
56 sctx->state[0] = __cpu_to_be64(SHA512_H0);
57 sctx->state[1] = __cpu_to_be64(SHA512_H1);
58 sctx->state[2] = __cpu_to_be64(SHA512_H2);
59 sctx->state[3] = __cpu_to_be64(SHA512_H3);
60 sctx->state[4] = __cpu_to_be64(SHA512_H4);
61 sctx->state[5] = __cpu_to_be64(SHA512_H5);
62 sctx->state[6] = __cpu_to_be64(SHA512_H6);
63 sctx->state[7] = __cpu_to_be64(SHA512_H7);
64 sctx->count[0] = 0;
65
66out:
48 return 0; 67 return 0;
49} 68}
50 69
@@ -54,11 +73,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
54 struct sha512_state *sctx = shash_desc_ctx(desc); 73 struct sha512_state *sctx = shash_desc_ctx(desc);
55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 74 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 75 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
57 struct nx_sg *in_sg; 76 u64 to_process, leftover = 0, total;
58 u64 to_process, leftover, total, spbc_bits;
59 u32 max_sg_len;
60 unsigned long irq_flags; 77 unsigned long irq_flags;
61 int rc = 0; 78 int rc = 0;
79 int data_len;
80 u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
62 81
63 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 82 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
64 83
@@ -66,16 +85,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
66 * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 85 * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
67 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover 86 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
68 */ 87 */
69 total = sctx->count[0] + len; 88 total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
70 if (total < SHA512_BLOCK_SIZE) { 89 if (total < SHA512_BLOCK_SIZE) {
71 memcpy(sctx->buf + sctx->count[0], data, len); 90 memcpy(sctx->buf + buf_len, data, len);
72 sctx->count[0] += len; 91 sctx->count[0] += len;
73 goto out; 92 goto out;
74 } 93 }
75 94
76 in_sg = nx_ctx->in_sg; 95 memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
77 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
78 nx_ctx->ap->sglen); 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
79 98
80 do { 99 do {
81 /* 100 /*
@@ -83,34 +102,43 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
83 * this update. This value is also restricted by the sg list 102 * this update. This value is also restricted by the sg list
84 * limits. 103 * limits.
85 */ 104 */
86 to_process = min_t(u64, total, nx_ctx->ap->databytelen); 105 to_process = total - leftover;
87 to_process = min_t(u64, to_process,
88 NX_PAGE_SIZE * (max_sg_len - 1));
89 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); 106 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
90 leftover = total - to_process; 107 leftover = total - to_process;
91 108
92 if (sctx->count[0]) { 109 if (buf_len) {
93 in_sg = nx_build_sg_list(nx_ctx->in_sg, 110 data_len = buf_len;
94 (u8 *) sctx->buf, 111 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
95 sctx->count[0], max_sg_len); 112 &nx_ctx->op.inlen,
113 &data_len,
114 (u8 *) sctx->buf,
115 NX_DS_SHA512);
116
117 if (rc || data_len != buf_len)
118 goto out;
96 } 119 }
97 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 120
98 to_process - sctx->count[0], 121 data_len = to_process - buf_len;
99 max_sg_len); 122 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
100 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 123 &nx_ctx->op.inlen,
101 sizeof(struct nx_sg); 124 &data_len,
102 125 (u8 *) data,
103 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 126 NX_DS_SHA512);
104 /* 127
105 * we've hit the nx chip previously and we're updating 128 if (rc || data_len != (to_process - buf_len))
106 * again, so copy over the partial digest. 129 goto out;
107 */ 130
108 memcpy(csbcpb->cpb.sha512.input_partial_digest, 131 to_process = (data_len + buf_len);
132 leftover = total - to_process;
133
134 /*
135 * we've hit the nx chip previously and we're updating
136 * again, so copy over the partial digest.
137 */
138 memcpy(csbcpb->cpb.sha512.input_partial_digest,
109 csbcpb->cpb.sha512.message_digest, 139 csbcpb->cpb.sha512.message_digest,
110 SHA512_DIGEST_SIZE); 140 SHA512_DIGEST_SIZE);
111 }
112 141
113 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
114 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 142 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
115 rc = -EINVAL; 143 rc = -EINVAL;
116 goto out; 144 goto out;
@@ -122,24 +150,18 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
122 goto out; 150 goto out;
123 151
124 atomic_inc(&(nx_ctx->stats->sha512_ops)); 152 atomic_inc(&(nx_ctx->stats->sha512_ops));
125 spbc_bits = csbcpb->cpb.sha512.spbc * 8;
126 csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
127 if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
128 csbcpb->cpb.sha512.message_bit_length_hi++;
129
130 /* everything after the first update is continuation */
131 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
132 153
133 total -= to_process; 154 total -= to_process;
134 data += to_process - sctx->count[0]; 155 data += to_process - buf_len;
135 sctx->count[0] = 0; 156 buf_len = 0;
136 in_sg = nx_ctx->in_sg; 157
137 } while (leftover >= SHA512_BLOCK_SIZE); 158 } while (leftover >= SHA512_BLOCK_SIZE);
138 159
139 /* copy the leftover back into the state struct */ 160 /* copy the leftover back into the state struct */
140 if (leftover) 161 if (leftover)
141 memcpy(sctx->buf, data, leftover); 162 memcpy(sctx->buf, data, leftover);
142 sctx->count[0] = leftover; 163 sctx->count[0] += len;
164 memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
143out: 165out:
144 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 166 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
145 return rc; 167 return rc;
@@ -150,39 +172,52 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
150 struct sha512_state *sctx = shash_desc_ctx(desc); 172 struct sha512_state *sctx = shash_desc_ctx(desc);
151 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 173 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
152 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 174 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
153 struct nx_sg *in_sg, *out_sg;
154 u32 max_sg_len;
155 u64 count0; 175 u64 count0;
156 unsigned long irq_flags; 176 unsigned long irq_flags;
157 int rc; 177 int rc;
178 int len;
158 179
159 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 180 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
160 181
161 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); 182 /* final is represented by continuing the operation and indicating that
162 183 * this is not an intermediate operation */
163 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 184 if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
164 /* we've hit the nx chip previously, now we're finalizing, 185 /* we've hit the nx chip previously, now we're finalizing,
165 * so copy over the partial digest */ 186 * so copy over the partial digest */
166 memcpy(csbcpb->cpb.sha512.input_partial_digest, 187 memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
167 csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); 188 SHA512_DIGEST_SIZE);
189 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
190 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
191 } else {
192 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
193 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
168 } 194 }
169 195
170 /* final is represented by continuing the operation and indicating that
171 * this is not an intermediate operation */
172 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 196 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
173 197
174 count0 = sctx->count[0] * 8; 198 count0 = sctx->count[0] * 8;
175 199
176 csbcpb->cpb.sha512.message_bit_length_lo += count0; 200 csbcpb->cpb.sha512.message_bit_length_lo = count0;
177 if (csbcpb->cpb.sha512.message_bit_length_lo < count0)
178 csbcpb->cpb.sha512.message_bit_length_hi++;
179 201
180 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], 202 len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
181 max_sg_len); 203 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
182 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, 204 &nx_ctx->op.inlen,
183 max_sg_len); 205 &len,
184 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 206 (u8 *)sctx->buf,
185 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 207 NX_DS_SHA512);
208
209 if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
210 goto out;
211
212 len = SHA512_DIGEST_SIZE;
213 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
214 &nx_ctx->op.outlen,
215 &len,
216 out,
217 NX_DS_SHA512);
218
219 if (rc)
220 goto out;
186 221
187 if (!nx_ctx->op.outlen) { 222 if (!nx_ctx->op.outlen) {
188 rc = -EINVAL; 223 rc = -EINVAL;
@@ -195,8 +230,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
195 goto out; 230 goto out;
196 231
197 atomic_inc(&(nx_ctx->stats->sha512_ops)); 232 atomic_inc(&(nx_ctx->stats->sha512_ops));
198 atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8, 233 atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
199 &(nx_ctx->stats->sha512_bytes));
200 234
201 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); 235 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
202out: 236out:
@@ -207,74 +241,18 @@ out:
207static int nx_sha512_export(struct shash_desc *desc, void *out) 241static int nx_sha512_export(struct shash_desc *desc, void *out)
208{ 242{
209 struct sha512_state *sctx = shash_desc_ctx(desc); 243 struct sha512_state *sctx = shash_desc_ctx(desc);
210 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
211 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
212 struct sha512_state *octx = out;
213 unsigned long irq_flags;
214 244
215 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 245 memcpy(out, sctx, sizeof(*sctx));
216 246
217 /* move message_bit_length (128 bits) into count and convert its value
218 * to bytes */
219 octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 |
220 ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61);
221 octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3;
222
223 octx->count[0] += sctx->count[0];
224 if (octx->count[0] < sctx->count[0])
225 octx->count[1]++;
226
227 memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
228
229 /* if no data has been processed yet, we need to export SHA512's
230 * initial data, in case this context gets imported into a software
231 * context */
232 if (csbcpb->cpb.sha512.message_bit_length_hi ||
233 csbcpb->cpb.sha512.message_bit_length_lo)
234 memcpy(octx->state, csbcpb->cpb.sha512.message_digest,
235 SHA512_DIGEST_SIZE);
236 else {
237 octx->state[0] = SHA512_H0;
238 octx->state[1] = SHA512_H1;
239 octx->state[2] = SHA512_H2;
240 octx->state[3] = SHA512_H3;
241 octx->state[4] = SHA512_H4;
242 octx->state[5] = SHA512_H5;
243 octx->state[6] = SHA512_H6;
244 octx->state[7] = SHA512_H7;
245 }
246
247 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
248 return 0; 247 return 0;
249} 248}
250 249
251static int nx_sha512_import(struct shash_desc *desc, const void *in) 250static int nx_sha512_import(struct shash_desc *desc, const void *in)
252{ 251{
253 struct sha512_state *sctx = shash_desc_ctx(desc); 252 struct sha512_state *sctx = shash_desc_ctx(desc);
254 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
255 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
256 const struct sha512_state *ictx = in;
257 unsigned long irq_flags;
258
259 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
260
261 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
262 sctx->count[0] = ictx->count[0] & 0x3f;
263 csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f)
264 << 3;
265 csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 |
266 ictx->count[0] >> 61;
267
268 if (csbcpb->cpb.sha512.message_bit_length_hi ||
269 csbcpb->cpb.sha512.message_bit_length_lo) {
270 memcpy(csbcpb->cpb.sha512.message_digest, ictx->state,
271 SHA512_DIGEST_SIZE);
272 253
273 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 254 memcpy(sctx, in, sizeof(*sctx));
274 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
275 }
276 255
277 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
278 return 0; 256 return 0;
279} 257}
280 258
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 5533fe31c90d..a392465d3e3f 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -90,7 +90,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
90 */ 90 */
91struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, 91struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
92 u8 *start_addr, 92 u8 *start_addr,
93 unsigned int len, 93 unsigned int *len,
94 u32 sgmax) 94 u32 sgmax)
95{ 95{
96 unsigned int sg_len = 0; 96 unsigned int sg_len = 0;
@@ -106,7 +106,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
106 else 106 else
107 sg_addr = __pa(sg_addr); 107 sg_addr = __pa(sg_addr);
108 108
109 end_addr = sg_addr + len; 109 end_addr = sg_addr + *len;
110 110
111 /* each iteration will write one struct nx_sg element and add the 111 /* each iteration will write one struct nx_sg element and add the
112 * length of data described by that element to sg_len. Once @len bytes 112 * length of data described by that element to sg_len. Once @len bytes
@@ -118,7 +118,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
118 * Also when using vmalloc'ed data, every time that a system page 118 * Also when using vmalloc'ed data, every time that a system page
119 * boundary is crossed the physical address needs to be re-calculated. 119 * boundary is crossed the physical address needs to be re-calculated.
120 */ 120 */
121 for (sg = sg_head; sg_len < len; sg++) { 121 for (sg = sg_head; sg_len < *len; sg++) {
122 u64 next_page; 122 u64 next_page;
123 123
124 sg->addr = sg_addr; 124 sg->addr = sg_addr;
@@ -133,15 +133,17 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
133 is_vmalloc_addr(start_addr + sg_len)) { 133 is_vmalloc_addr(start_addr + sg_len)) {
134 sg_addr = page_to_phys(vmalloc_to_page( 134 sg_addr = page_to_phys(vmalloc_to_page(
135 start_addr + sg_len)); 135 start_addr + sg_len));
136 end_addr = sg_addr + len - sg_len; 136 end_addr = sg_addr + *len - sg_len;
137 } 137 }
138 138
139 if ((sg - sg_head) == sgmax) { 139 if ((sg - sg_head) == sgmax) {
140 pr_err("nx: scatter/gather list overflow, pid: %d\n", 140 pr_err("nx: scatter/gather list overflow, pid: %d\n",
141 current->pid); 141 current->pid);
142 return NULL; 142 sg++;
143 break;
143 } 144 }
144 } 145 }
146 *len = sg_len;
145 147
146 /* return the moved sg_head pointer */ 148 /* return the moved sg_head pointer */
147 return sg; 149 return sg;
@@ -160,11 +162,11 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
160 unsigned int sglen, 162 unsigned int sglen,
161 struct scatterlist *sg_src, 163 struct scatterlist *sg_src,
162 unsigned int start, 164 unsigned int start,
163 unsigned int src_len) 165 unsigned int *src_len)
164{ 166{
165 struct scatter_walk walk; 167 struct scatter_walk walk;
166 struct nx_sg *nx_sg = nx_dst; 168 struct nx_sg *nx_sg = nx_dst;
167 unsigned int n, offset = 0, len = src_len; 169 unsigned int n, offset = 0, len = *src_len;
168 char *dst; 170 char *dst;
169 171
170 /* we need to fast forward through @start bytes first */ 172 /* we need to fast forward through @start bytes first */
@@ -182,27 +184,101 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
182 * element we're currently looking at */ 184 * element we're currently looking at */
183 scatterwalk_advance(&walk, start - offset); 185 scatterwalk_advance(&walk, start - offset);
184 186
185 while (len && nx_sg) { 187 while (len && (nx_sg - nx_dst) < sglen) {
186 n = scatterwalk_clamp(&walk, len); 188 n = scatterwalk_clamp(&walk, len);
187 if (!n) { 189 if (!n) {
188 scatterwalk_start(&walk, sg_next(walk.sg)); 190 /* In cases where we have scatterlist chain scatterwalk_sg_next
191 * handles with it properly */
192 scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
189 n = scatterwalk_clamp(&walk, len); 193 n = scatterwalk_clamp(&walk, len);
190 } 194 }
191 dst = scatterwalk_map(&walk); 195 dst = scatterwalk_map(&walk);
192 196
193 nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen); 197 nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
194 len -= n; 198 len -= n;
195 199
196 scatterwalk_unmap(dst); 200 scatterwalk_unmap(dst);
197 scatterwalk_advance(&walk, n); 201 scatterwalk_advance(&walk, n);
198 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); 202 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
199 } 203 }
204 /* update to_process */
205 *src_len -= len;
200 206
201 /* return the moved destination pointer */ 207 /* return the moved destination pointer */
202 return nx_sg; 208 return nx_sg;
203} 209}
204 210
205/** 211/**
212 * trim_sg_list - ensures the bound in sg list.
213 * @sg: sg list head
214 * @end: sg lisg end
215 * @delta: is the amount we need to crop in order to bound the list.
216 *
217 */
218static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
219{
220 while (delta && end > sg) {
221 struct nx_sg *last = end - 1;
222
223 if (last->len > delta) {
224 last->len -= delta;
225 delta = 0;
226 } else {
227 end--;
228 delta -= last->len;
229 }
230 }
231 return (sg - end) * sizeof(struct nx_sg);
232}
233
234/**
235 * nx_sha_build_sg_list - walk and build sg list to sha modes
236 * using right bounds and limits.
237 * @nx_ctx: NX crypto context for the lists we're building
238 * @nx_sg: current sg list in or out list
239 * @op_len: current op_len to be used in order to build a sg list
240 * @nbytes: number or bytes to be processed
241 * @offset: buf offset
242 * @mode: SHA256 or SHA512
243 */
244int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
245 struct nx_sg *nx_in_outsg,
246 s64 *op_len,
247 unsigned int *nbytes,
248 u8 *offset,
249 u32 mode)
250{
251 unsigned int delta = 0;
252 unsigned int total = *nbytes;
253 struct nx_sg *nx_insg = nx_in_outsg;
254 unsigned int max_sg_len;
255
256 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
257 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
258 max_sg_len = min_t(u64, max_sg_len,
259 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
260
261 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
262 nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
263
264 switch (mode) {
265 case NX_DS_SHA256:
266 if (*nbytes < total)
267 delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
268 break;
269 case NX_DS_SHA512:
270 if (*nbytes < total)
271 delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
272 break;
273 default:
274 return -EINVAL;
275 }
276 *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
277
278 return 0;
279}
280
281/**
206 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX 282 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
207 * scatterlists based on them. 283 * scatterlists based on them.
208 * 284 *
@@ -223,26 +299,39 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
223 struct blkcipher_desc *desc, 299 struct blkcipher_desc *desc,
224 struct scatterlist *dst, 300 struct scatterlist *dst,
225 struct scatterlist *src, 301 struct scatterlist *src,
226 unsigned int nbytes, 302 unsigned int *nbytes,
227 unsigned int offset, 303 unsigned int offset,
228 u8 *iv) 304 u8 *iv)
229{ 305{
306 unsigned int delta = 0;
307 unsigned int total = *nbytes;
230 struct nx_sg *nx_insg = nx_ctx->in_sg; 308 struct nx_sg *nx_insg = nx_ctx->in_sg;
231 struct nx_sg *nx_outsg = nx_ctx->out_sg; 309 struct nx_sg *nx_outsg = nx_ctx->out_sg;
310 unsigned int max_sg_len;
311
312 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
313 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
314 max_sg_len = min_t(u64, max_sg_len,
315 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
232 316
233 if (iv) 317 if (iv)
234 memcpy(iv, desc->info, AES_BLOCK_SIZE); 318 memcpy(iv, desc->info, AES_BLOCK_SIZE);
235 319
236 nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 320 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
237 offset, nbytes); 321
238 nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 322 nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
239 offset, nbytes); 323 offset, nbytes);
324 nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
325 offset, nbytes);
326
327 if (*nbytes < total)
328 delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
240 329
241 /* these lengths should be negative, which will indicate to phyp that 330 /* these lengths should be negative, which will indicate to phyp that
242 * the input and output parameters are scatterlists, not linear 331 * the input and output parameters are scatterlists, not linear
243 * buffers */ 332 * buffers */
244 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); 333 nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
245 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); 334 nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
246 335
247 return 0; 336 return 0;
248} 337}
@@ -540,10 +629,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
540 629
541 /* we need an extra page for csbcpb_aead for these modes */ 630 /* we need an extra page for csbcpb_aead for these modes */
542 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) 631 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
543 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) + 632 nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
544 sizeof(struct nx_csbcpb); 633 sizeof(struct nx_csbcpb);
545 else 634 else
546 nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) + 635 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
547 sizeof(struct nx_csbcpb); 636 sizeof(struct nx_csbcpb);
548 637
549 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL); 638 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index befda07ca1da..6c9ecaaead52 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -153,13 +153,15 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
153void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); 153void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
154int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, 154int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
155 u32 may_sleep); 155 u32 may_sleep);
156struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32); 156int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
157 s64 *, unsigned int *, u8 *, u32);
158struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
157int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, 159int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
158 struct scatterlist *, struct scatterlist *, unsigned int, 160 struct scatterlist *, struct scatterlist *, unsigned int *,
159 unsigned int, u8 *); 161 unsigned int, u8 *);
160struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, 162struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
161 struct scatterlist *, unsigned int, 163 struct scatterlist *, unsigned int,
162 unsigned int); 164 unsigned int *);
163 165
164#ifdef CONFIG_DEBUG_FS 166#ifdef CONFIG_DEBUG_FS
165#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) 167#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 633ba945e153..c178ed8c3908 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
563MODULE_LICENSE("GPL"); 563MODULE_LICENSE("GPL");
564MODULE_AUTHOR("Michal Ludvig"); 564MODULE_AUTHOR("Michal Ludvig");
565 565
566MODULE_ALIAS("aes"); 566MODULE_ALIAS_CRYPTO("aes");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index bace885634f2..95f7d27ce491 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
593MODULE_LICENSE("GPL"); 593MODULE_LICENSE("GPL");
594MODULE_AUTHOR("Michal Ludvig"); 594MODULE_AUTHOR("Michal Ludvig");
595 595
596MODULE_ALIAS("sha1-all"); 596MODULE_ALIAS_CRYPTO("sha1-all");
597MODULE_ALIAS("sha256-all"); 597MODULE_ALIAS_CRYPTO("sha256-all");
598MODULE_ALIAS("sha1-padlock"); 598MODULE_ALIAS_CRYPTO("sha1-padlock");
599MODULE_ALIAS("sha256-padlock"); 599MODULE_ALIAS_CRYPTO("sha256-padlock");
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index fe7b3f06f6e6..2ed425664a16 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -56,8 +56,6 @@
56#define PCI_VENDOR_ID_INTEL 0x8086 56#define PCI_VENDOR_ID_INTEL 0x8086
57#define ADF_DH895XCC_DEVICE_NAME "dh895xcc" 57#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
58#define ADF_DH895XCC_PCI_DEVICE_ID 0x435 58#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
59#define ADF_DH895XCC_PMISC_BAR 1
60#define ADF_DH895XCC_ETR_BAR 2
61#define ADF_PCI_MAX_BARS 3 59#define ADF_PCI_MAX_BARS 3
62#define ADF_DEVICE_NAME_LENGTH 32 60#define ADF_DEVICE_NAME_LENGTH 32
63#define ADF_ETR_MAX_RINGS_PER_BANK 16 61#define ADF_ETR_MAX_RINGS_PER_BANK 16
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index c29d4c3926bf..10ce4a2854ab 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -90,7 +90,7 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
90 uint16_t ppdstat = 0, bridge_ctl = 0; 90 uint16_t ppdstat = 0, bridge_ctl = 0;
91 int pending = 0; 91 int pending = 0;
92 92
93 pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id); 93 pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
94 pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); 94 pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
95 pending = ppdstat & PCI_EXP_DEVSTA_TRPND; 95 pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
96 if (pending) { 96 if (pending) {
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 244d73378f0e..7ee93f881db6 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -52,6 +52,7 @@
52#include <linux/pci.h> 52#include <linux/pci.h>
53#include <linux/cdev.h> 53#include <linux/cdev.h>
54#include <linux/uaccess.h> 54#include <linux/uaccess.h>
55#include <linux/crypto.h>
55 56
56#include "adf_accel_devices.h" 57#include "adf_accel_devices.h"
57#include "adf_common_drv.h" 58#include "adf_common_drv.h"
@@ -487,4 +488,4 @@ module_exit(adf_unregister_ctl_device_driver);
487MODULE_LICENSE("Dual BSD/GPL"); 488MODULE_LICENSE("Dual BSD/GPL");
488MODULE_AUTHOR("Intel"); 489MODULE_AUTHOR("Intel");
489MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); 490MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
490MODULE_ALIAS("intel_qat"); 491MODULE_ALIAS_CRYPTO("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index ae71555c0868..4a0a829d4500 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -129,12 +129,13 @@ struct adf_accel_dev *adf_devmgr_get_first(void)
129 * Function returns acceleration device associated with the given pci device. 129 * Function returns acceleration device associated with the given pci device.
130 * To be used by QAT device specific drivers. 130 * To be used by QAT device specific drivers.
131 * 131 *
132 * Return: pinter to accel_dev or NULL if not found. 132 * Return: pointer to accel_dev or NULL if not found.
133 */ 133 */
134struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) 134struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
135{ 135{
136 struct list_head *itr; 136 struct list_head *itr;
137 137
138 mutex_lock(&table_lock);
138 list_for_each(itr, &accel_table) { 139 list_for_each(itr, &accel_table) {
139 struct adf_accel_dev *ptr = 140 struct adf_accel_dev *ptr =
140 list_entry(itr, struct adf_accel_dev, list); 141 list_entry(itr, struct adf_accel_dev, list);
@@ -144,6 +145,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
144 return ptr; 145 return ptr;
145 } 146 }
146 } 147 }
148 mutex_unlock(&table_lock);
147 return NULL; 149 return NULL;
148} 150}
149EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); 151EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
@@ -152,6 +154,7 @@ struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
152{ 154{
153 struct list_head *itr; 155 struct list_head *itr;
154 156
157 mutex_lock(&table_lock);
155 list_for_each(itr, &accel_table) { 158 list_for_each(itr, &accel_table) {
156 struct adf_accel_dev *ptr = 159 struct adf_accel_dev *ptr =
157 list_entry(itr, struct adf_accel_dev, list); 160 list_entry(itr, struct adf_accel_dev, list);
@@ -161,6 +164,7 @@ struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
161 return ptr; 164 return ptr;
162 } 165 }
163 } 166 }
167 mutex_unlock(&table_lock);
164 return NULL; 168 return NULL;
165} 169}
166 170
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 9dd2cb72a4e8..7dd54aaee9fa 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -376,8 +376,9 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
376 return 0; 376 return 0;
377} 377}
378 378
379static void adf_enable_coalesc(struct adf_etr_bank_data *bank, 379static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
380 const char *section, uint32_t bank_num_in_accel) 380 const char *section,
381 uint32_t bank_num_in_accel)
381{ 382{
382 if (adf_get_cfg_int(bank->accel_dev, section, 383 if (adf_get_cfg_int(bank->accel_dev, section,
383 ADF_ETRMGR_COALESCE_TIMER_FORMAT, 384 ADF_ETRMGR_COALESCE_TIMER_FORMAT,
@@ -396,7 +397,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
396 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 397 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
397 struct adf_etr_ring_data *ring; 398 struct adf_etr_ring_data *ring;
398 struct adf_etr_ring_data *tx_ring; 399 struct adf_etr_ring_data *tx_ring;
399 uint32_t i, coalesc_enabled; 400 uint32_t i, coalesc_enabled = 0;
400 401
401 memset(bank, 0, sizeof(*bank)); 402 memset(bank, 0, sizeof(*bank));
402 bank->bank_number = bank_num; 403 bank->bank_number = bank_num;
@@ -407,10 +408,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
407 /* Enable IRQ coalescing always. This will allow to use 408 /* Enable IRQ coalescing always. This will allow to use
408 * the optimised flag and coalesc register. 409 * the optimised flag and coalesc register.
409 * If it is disabled in the config file just use min time value */ 410 * If it is disabled in the config file just use min time value */
410 if (adf_get_cfg_int(accel_dev, "Accelerator0", 411 if ((adf_get_cfg_int(accel_dev, "Accelerator0",
411 ADF_ETRMGR_COALESCING_ENABLED_FORMAT, 412 ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
412 bank_num, &coalesc_enabled) && coalesc_enabled) 413 &coalesc_enabled) == 0) && coalesc_enabled)
413 adf_enable_coalesc(bank, "Accelerator0", bank_num); 414 adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
414 else 415 else
415 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME; 416 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
416 417
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 91d88d676580..160c9a36c919 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -83,14 +83,14 @@
83#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M 83#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
84#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K 84#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
85 85
86/* Valid internal msg size values internal */ 86/* Valid internal msg size values */
87#define ADF_MSG_SIZE_32 0x01 87#define ADF_MSG_SIZE_32 0x01
88#define ADF_MSG_SIZE_64 0x02 88#define ADF_MSG_SIZE_64 0x02
89#define ADF_MSG_SIZE_128 0x04 89#define ADF_MSG_SIZE_128 0x04
90#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 90#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
91#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 91#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
92 92
93/* Size to bytes conversion macros for ring and msg values */ 93/* Size to bytes conversion macros for ring and msg size values */
94#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) 94#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
95#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) 95#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
96#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) 96#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
@@ -100,8 +100,11 @@
100#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ 100#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
101 ADF_RING_SIZE_4K : SIZE) 101 ADF_RING_SIZE_4K : SIZE)
102#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) 102#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
103#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
104 SIZE) & ~0x4)
105/* Max outstanding requests */
103#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ 106#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
104 ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1) 107 ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
105#define BUILD_RING_CONFIG(size) \ 108#define BUILD_RING_CONFIG(size) \
106 ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ 109 ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
107 | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ 110 | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 9e9619cd4a79..19eea1c832ac 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -161,7 +161,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
161 __be64 *hash512_state_out; 161 __be64 *hash512_state_out;
162 int i, offset; 162 int i, offset;
163 163
164 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64); 164 memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
165 shash->tfm = ctx->hash_tfm; 165 shash->tfm = ctx->hash_tfm;
166 shash->flags = 0x0; 166 shash->flags = 0x0;
167 167
@@ -174,13 +174,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
174 174
175 memcpy(ipad, buff, digest_size); 175 memcpy(ipad, buff, digest_size);
176 memcpy(opad, buff, digest_size); 176 memcpy(opad, buff, digest_size);
177 memset(ipad + digest_size, 0, block_size - digest_size); 177 memzero_explicit(ipad + digest_size, block_size - digest_size);
178 memset(opad + digest_size, 0, block_size - digest_size); 178 memzero_explicit(opad + digest_size, block_size - digest_size);
179 } else { 179 } else {
180 memcpy(ipad, auth_key, auth_keylen); 180 memcpy(ipad, auth_key, auth_keylen);
181 memcpy(opad, auth_key, auth_keylen); 181 memcpy(opad, auth_key, auth_keylen);
182 memset(ipad + auth_keylen, 0, block_size - auth_keylen); 182 memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
183 memset(opad + auth_keylen, 0, block_size - auth_keylen); 183 memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
184 } 184 }
185 185
186 for (i = 0; i < block_size; i++) { 186 for (i = 0; i < block_size; i++) {
@@ -254,6 +254,8 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
254 default: 254 default:
255 return -EFAULT; 255 return -EFAULT;
256 } 256 }
257 memzero_explicit(ipad, block_size);
258 memzero_explicit(opad, block_size);
257 return 0; 259 return 0;
258} 260}
259 261
@@ -466,7 +468,6 @@ static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
466 break; 468 break;
467 default: 469 default:
468 goto bad_key; 470 goto bad_key;
469 break;
470 } 471 }
471 472
472 if (qat_alg_init_enc_session(ctx, alg, &keys)) 473 if (qat_alg_init_enc_session(ctx, alg, &keys))
@@ -493,12 +494,12 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
493 if (ctx->enc_cd) { 494 if (ctx->enc_cd) {
494 /* rekeying */ 495 /* rekeying */
495 dev = &GET_DEV(ctx->inst->accel_dev); 496 dev = &GET_DEV(ctx->inst->accel_dev);
496 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); 497 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
497 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); 498 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
498 memset(&ctx->enc_fw_req_tmpl, 0, 499 memzero_explicit(&ctx->enc_fw_req_tmpl,
499 sizeof(struct icp_qat_fw_la_bulk_req)); 500 sizeof(struct icp_qat_fw_la_bulk_req));
500 memset(&ctx->dec_fw_req_tmpl, 0, 501 memzero_explicit(&ctx->dec_fw_req_tmpl,
501 sizeof(struct icp_qat_fw_la_bulk_req)); 502 sizeof(struct icp_qat_fw_la_bulk_req));
502 } else { 503 } else {
503 /* new key */ 504 /* new key */
504 int node = get_current_node(); 505 int node = get_current_node();
@@ -535,10 +536,12 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
535 return 0; 536 return 0;
536 537
537out_free_all: 538out_free_all:
539 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
538 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 540 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
539 ctx->dec_cd, ctx->dec_cd_paddr); 541 ctx->dec_cd, ctx->dec_cd_paddr);
540 ctx->dec_cd = NULL; 542 ctx->dec_cd = NULL;
541out_free_enc: 543out_free_enc:
544 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
542 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 545 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
543 ctx->enc_cd, ctx->enc_cd_paddr); 546 ctx->enc_cd, ctx->enc_cd_paddr);
544 ctx->enc_cd = NULL; 547 ctx->enc_cd = NULL;
@@ -836,7 +839,7 @@ static int qat_alg_init(struct crypto_tfm *tfm,
836{ 839{
837 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); 840 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
838 841
839 memset(ctx, '\0', sizeof(*ctx)); 842 memzero_explicit(ctx, sizeof(*ctx));
840 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); 843 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
841 if (IS_ERR(ctx->hash_tfm)) 844 if (IS_ERR(ctx->hash_tfm))
842 return -EFAULT; 845 return -EFAULT;
@@ -876,12 +879,16 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
876 return; 879 return;
877 880
878 dev = &GET_DEV(inst->accel_dev); 881 dev = &GET_DEV(inst->accel_dev);
879 if (ctx->enc_cd) 882 if (ctx->enc_cd) {
883 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
880 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 884 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
881 ctx->enc_cd, ctx->enc_cd_paddr); 885 ctx->enc_cd, ctx->enc_cd_paddr);
882 if (ctx->dec_cd) 886 }
887 if (ctx->dec_cd) {
888 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
883 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 889 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
884 ctx->dec_cd, ctx->dec_cd_paddr); 890 ctx->dec_cd, ctx->dec_cd_paddr);
891 }
885 qat_crypto_put_instance(inst); 892 qat_crypto_put_instance(inst);
886} 893}
887 894
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 9b8a31521ff3..b818c19713bf 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -679,7 +679,8 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
679 struct icp_qat_fw_loader_handle *handle; 679 struct icp_qat_fw_loader_handle *handle;
680 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; 680 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
681 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 681 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
682 struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR]; 682 struct adf_bar *bar =
683 &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
683 684
684 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 685 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
685 if (!handle) 686 if (!handle)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 65dd1ff93d3b..01e0be21e93a 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -48,6 +48,8 @@
48#define ADF_DH895x_HW_DATA_H_ 48#define ADF_DH895x_HW_DATA_H_
49 49
50/* PCIe configuration space */ 50/* PCIe configuration space */
51#define ADF_DH895XCC_PMISC_BAR 1
52#define ADF_DH895XCC_ETR_BAR 2
51#define ADF_DH895XCC_RX_RINGS_OFFSET 8 53#define ADF_DH895XCC_RX_RINGS_OFFSET 8
52#define ADF_DH895XCC_TX_RINGS_MASK 0xFF 54#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
53#define ADF_DH895XCC_FUSECTL_OFFSET 0x40 55#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d96ee21b9b77..fe8f89697ad8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -186,10 +186,8 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
186 accel_dev->accel_pci_dev.msix_entries.names = names; 186 accel_dev->accel_pci_dev.msix_entries.names = names;
187 return 0; 187 return 0;
188err: 188err:
189 for (i = 0; i < msix_num_entries; i++) { 189 for (i = 0; i < msix_num_entries; i++)
190 if (*(names + i)) 190 kfree(*(names + i));
191 kfree(*(names + i));
192 }
193 kfree(entries); 191 kfree(entries);
194 kfree(names); 192 kfree(names);
195 return -ENOMEM; 193 return -ENOMEM;
@@ -203,10 +201,8 @@ static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
203 int i; 201 int i;
204 202
205 kfree(accel_dev->accel_pci_dev.msix_entries.entries); 203 kfree(accel_dev->accel_pci_dev.msix_entries.entries);
206 for (i = 0; i < msix_num_entries; i++) { 204 for (i = 0; i < msix_num_entries; i++)
207 if (*(names + i)) 205 kfree(*(names + i));
208 kfree(*(names + i));
209 }
210 kfree(names); 206 kfree(names);
211} 207}
212 208
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 164e1ec624e3..579f539e5975 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Support for SAHARA cryptographic accelerator. 4 * Support for SAHARA cryptographic accelerator.
5 * 5 *
6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
6 * Copyright (c) 2013 Vista Silicon S.L. 7 * Copyright (c) 2013 Vista Silicon S.L.
7 * Author: Javier Martin <javier.martin@vista-silicon.com> 8 * Author: Javier Martin <javier.martin@vista-silicon.com>
8 * 9 *
@@ -15,6 +16,10 @@
15 16
16#include <crypto/algapi.h> 17#include <crypto/algapi.h>
17#include <crypto/aes.h> 18#include <crypto/aes.h>
19#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/scatterwalk.h>
22#include <crypto/sha.h>
18 23
19#include <linux/clk.h> 24#include <linux/clk.h>
20#include <linux/crypto.h> 25#include <linux/crypto.h>
@@ -22,12 +27,19 @@
22#include <linux/io.h> 27#include <linux/io.h>
23#include <linux/irq.h> 28#include <linux/irq.h>
24#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/kthread.h>
25#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/mutex.h>
26#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/of_device.h>
27#include <linux/platform_device.h> 35#include <linux/platform_device.h>
28 36
37#define SHA_BUFFER_LEN PAGE_SIZE
38#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
39
29#define SAHARA_NAME "sahara" 40#define SAHARA_NAME "sahara"
30#define SAHARA_VERSION_3 3 41#define SAHARA_VERSION_3 3
42#define SAHARA_VERSION_4 4
31#define SAHARA_TIMEOUT_MS 1000 43#define SAHARA_TIMEOUT_MS 1000
32#define SAHARA_MAX_HW_DESC 2 44#define SAHARA_MAX_HW_DESC 2
33#define SAHARA_MAX_HW_LINK 20 45#define SAHARA_MAX_HW_LINK 20
@@ -36,7 +48,6 @@
36#define FLAGS_ENCRYPT BIT(0) 48#define FLAGS_ENCRYPT BIT(0)
37#define FLAGS_CBC BIT(1) 49#define FLAGS_CBC BIT(1)
38#define FLAGS_NEW_KEY BIT(3) 50#define FLAGS_NEW_KEY BIT(3)
39#define FLAGS_BUSY 4
40 51
41#define SAHARA_HDR_BASE 0x00800000 52#define SAHARA_HDR_BASE 0x00800000
42#define SAHARA_HDR_SKHA_ALG_AES 0 53#define SAHARA_HDR_SKHA_ALG_AES 0
@@ -50,6 +61,23 @@
50#define SAHARA_HDR_CHA_MDHA (2 << 28) 61#define SAHARA_HDR_CHA_MDHA (2 << 28)
51#define SAHARA_HDR_PARITY_BIT (1 << 31) 62#define SAHARA_HDR_PARITY_BIT (1 << 31)
52 63
64#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66#define SAHARA_HDR_MDHA_HASH 0xA0850000
67#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68#define SAHARA_HDR_MDHA_ALG_SHA1 0
69#define SAHARA_HDR_MDHA_ALG_MD5 1
70#define SAHARA_HDR_MDHA_ALG_SHA256 2
71#define SAHARA_HDR_MDHA_ALG_SHA224 3
72#define SAHARA_HDR_MDHA_PDATA (1 << 2)
73#define SAHARA_HDR_MDHA_HMAC (1 << 3)
74#define SAHARA_HDR_MDHA_INIT (1 << 5)
75#define SAHARA_HDR_MDHA_IPAD (1 << 6)
76#define SAHARA_HDR_MDHA_OPAD (1 << 7)
77#define SAHARA_HDR_MDHA_SWAP (1 << 8)
78#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79#define SAHARA_HDR_MDHA_SSL (1 << 10)
80
53/* SAHARA can only process one request at a time */ 81/* SAHARA can only process one request at a time */
54#define SAHARA_QUEUE_LENGTH 1 82#define SAHARA_QUEUE_LENGTH 1
55 83
@@ -117,31 +145,74 @@ struct sahara_hw_link {
117}; 145};
118 146
119struct sahara_ctx { 147struct sahara_ctx {
120 struct sahara_dev *dev;
121 unsigned long flags; 148 unsigned long flags;
149
150 /* AES-specific context */
122 int keylen; 151 int keylen;
123 u8 key[AES_KEYSIZE_128]; 152 u8 key[AES_KEYSIZE_128];
124 struct crypto_ablkcipher *fallback; 153 struct crypto_ablkcipher *fallback;
154
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
125}; 157};
126 158
127struct sahara_aes_reqctx { 159struct sahara_aes_reqctx {
128 unsigned long mode; 160 unsigned long mode;
129}; 161};
130 162
163/*
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
176 * @in_sg_chained: specifies if chained scatterlists are used or not
177 * @total: total number of bytes for transfer
178 * @last: is this the last block
179 * @first: is this the first block
180 * @active: inside a transfer
181 */
182struct sahara_sha_reqctx {
183 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
185 u8 context[SHA256_DIGEST_SIZE + 4];
186 struct mutex mutex;
187 unsigned int mode;
188 unsigned int digest_size;
189 unsigned int context_size;
190 unsigned int buf_cnt;
191 unsigned int sg_in_idx;
192 struct scatterlist *in_sg;
193 struct scatterlist in_sg_chain[2];
194 bool in_sg_chained;
195 size_t total;
196 unsigned int last;
197 unsigned int first;
198 unsigned int active;
199};
200
131struct sahara_dev { 201struct sahara_dev {
132 struct device *device; 202 struct device *device;
203 unsigned int version;
133 void __iomem *regs_base; 204 void __iomem *regs_base;
134 struct clk *clk_ipg; 205 struct clk *clk_ipg;
135 struct clk *clk_ahb; 206 struct clk *clk_ahb;
207 struct mutex queue_mutex;
208 struct task_struct *kthread;
209 struct completion dma_completion;
136 210
137 struct sahara_ctx *ctx; 211 struct sahara_ctx *ctx;
138 spinlock_t lock; 212 spinlock_t lock;
139 struct crypto_queue queue; 213 struct crypto_queue queue;
140 unsigned long flags; 214 unsigned long flags;
141 215
142 struct tasklet_struct done_task;
143 struct tasklet_struct queue_task;
144
145 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC]; 216 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
146 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC]; 217 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
147 218
@@ -151,10 +222,12 @@ struct sahara_dev {
151 u8 *iv_base; 222 u8 *iv_base;
152 dma_addr_t iv_phys_base; 223 dma_addr_t iv_phys_base;
153 224
225 u8 *context_base;
226 dma_addr_t context_phys_base;
227
154 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK]; 228 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
155 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK]; 229 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
156 230
157 struct ablkcipher_request *req;
158 size_t total; 231 size_t total;
159 struct scatterlist *in_sg; 232 struct scatterlist *in_sg;
160 unsigned int nb_in_sg; 233 unsigned int nb_in_sg;
@@ -162,7 +235,6 @@ struct sahara_dev {
162 unsigned int nb_out_sg; 235 unsigned int nb_out_sg;
163 236
164 u32 error; 237 u32 error;
165 struct timer_list watchdog;
166}; 238};
167 239
168static struct sahara_dev *dev_ptr; 240static struct sahara_dev *dev_ptr;
@@ -401,34 +473,6 @@ static void sahara_dump_links(struct sahara_dev *dev)
401 dev_dbg(dev->device, "\n"); 473 dev_dbg(dev->device, "\n");
402} 474}
403 475
404static void sahara_aes_done_task(unsigned long data)
405{
406 struct sahara_dev *dev = (struct sahara_dev *)data;
407
408 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
409 DMA_TO_DEVICE);
410 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
411 DMA_FROM_DEVICE);
412
413 spin_lock(&dev->lock);
414 clear_bit(FLAGS_BUSY, &dev->flags);
415 spin_unlock(&dev->lock);
416
417 dev->req->base.complete(&dev->req->base, dev->error);
418}
419
420static void sahara_watchdog(unsigned long data)
421{
422 struct sahara_dev *dev = (struct sahara_dev *)data;
423 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
424 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
425
426 sahara_decode_status(dev, stat);
427 sahara_decode_error(dev, err);
428 dev->error = -ETIMEDOUT;
429 sahara_aes_done_task(data);
430}
431
432static int sahara_hw_descriptor_create(struct sahara_dev *dev) 476static int sahara_hw_descriptor_create(struct sahara_dev *dev)
433{ 477{
434 struct sahara_ctx *ctx = dev->ctx; 478 struct sahara_ctx *ctx = dev->ctx;
@@ -512,9 +556,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
512 sahara_dump_descriptors(dev); 556 sahara_dump_descriptors(dev);
513 sahara_dump_links(dev); 557 sahara_dump_links(dev);
514 558
515 /* Start processing descriptor chain. */
516 mod_timer(&dev->watchdog,
517 jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
518 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR); 559 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
519 560
520 return 0; 561 return 0;
@@ -529,37 +570,19 @@ unmap_in:
529 return -EINVAL; 570 return -EINVAL;
530} 571}
531 572
532static void sahara_aes_queue_task(unsigned long data) 573static int sahara_aes_process(struct ablkcipher_request *req)
533{ 574{
534 struct sahara_dev *dev = (struct sahara_dev *)data; 575 struct sahara_dev *dev = dev_ptr;
535 struct crypto_async_request *async_req, *backlog;
536 struct sahara_ctx *ctx; 576 struct sahara_ctx *ctx;
537 struct sahara_aes_reqctx *rctx; 577 struct sahara_aes_reqctx *rctx;
538 struct ablkcipher_request *req;
539 int ret; 578 int ret;
540 579
541 spin_lock(&dev->lock);
542 backlog = crypto_get_backlog(&dev->queue);
543 async_req = crypto_dequeue_request(&dev->queue);
544 if (!async_req)
545 clear_bit(FLAGS_BUSY, &dev->flags);
546 spin_unlock(&dev->lock);
547
548 if (!async_req)
549 return;
550
551 if (backlog)
552 backlog->complete(backlog, -EINPROGRESS);
553
554 req = ablkcipher_request_cast(async_req);
555
556 /* Request is ready to be dispatched by the device */ 580 /* Request is ready to be dispatched by the device */
557 dev_dbg(dev->device, 581 dev_dbg(dev->device,
558 "dispatch request (nbytes=%d, src=%p, dst=%p)\n", 582 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
559 req->nbytes, req->src, req->dst); 583 req->nbytes, req->src, req->dst);
560 584
561 /* assign new request to device */ 585 /* assign new request to device */
562 dev->req = req;
563 dev->total = req->nbytes; 586 dev->total = req->nbytes;
564 dev->in_sg = req->src; 587 dev->in_sg = req->src;
565 dev->out_sg = req->dst; 588 dev->out_sg = req->dst;
@@ -573,16 +596,25 @@ static void sahara_aes_queue_task(unsigned long data)
573 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128); 596 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
574 597
575 /* assign new context to device */ 598 /* assign new context to device */
576 ctx->dev = dev;
577 dev->ctx = ctx; 599 dev->ctx = ctx;
578 600
601 reinit_completion(&dev->dma_completion);
602
579 ret = sahara_hw_descriptor_create(dev); 603 ret = sahara_hw_descriptor_create(dev);
580 if (ret < 0) { 604
581 spin_lock(&dev->lock); 605 ret = wait_for_completion_timeout(&dev->dma_completion,
582 clear_bit(FLAGS_BUSY, &dev->flags); 606 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
583 spin_unlock(&dev->lock); 607 if (!ret) {
584 dev->req->base.complete(&dev->req->base, ret); 608 dev_err(dev->device, "AES timeout\n");
609 return -ETIMEDOUT;
585 } 610 }
611
612 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
613 DMA_TO_DEVICE);
614 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
615 DMA_FROM_DEVICE);
616
617 return 0;
586} 618}
587 619
588static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 620static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -624,12 +656,9 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
624 656
625static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 657static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
626{ 658{
627 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
628 crypto_ablkcipher_reqtfm(req));
629 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req); 659 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
630 struct sahara_dev *dev = dev_ptr; 660 struct sahara_dev *dev = dev_ptr;
631 int err = 0; 661 int err = 0;
632 int busy;
633 662
634 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n", 663 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
635 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); 664 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
@@ -640,16 +669,13 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
640 return -EINVAL; 669 return -EINVAL;
641 } 670 }
642 671
643 ctx->dev = dev;
644
645 rctx->mode = mode; 672 rctx->mode = mode;
646 spin_lock_bh(&dev->lock); 673
674 mutex_lock(&dev->queue_mutex);
647 err = ablkcipher_enqueue_request(&dev->queue, req); 675 err = ablkcipher_enqueue_request(&dev->queue, req);
648 busy = test_and_set_bit(FLAGS_BUSY, &dev->flags); 676 mutex_unlock(&dev->queue_mutex);
649 spin_unlock_bh(&dev->lock);
650 677
651 if (!busy) 678 wake_up_process(dev->kthread);
652 tasklet_schedule(&dev->queue_task);
653 679
654 return err; 680 return err;
655} 681}
@@ -752,6 +778,484 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
752 ctx->fallback = NULL; 778 ctx->fallback = NULL;
753} 779}
754 780
781static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
782 struct sahara_sha_reqctx *rctx)
783{
784 u32 hdr = 0;
785
786 hdr = rctx->mode;
787
788 if (rctx->first) {
789 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
790 hdr |= SAHARA_HDR_MDHA_INIT;
791 } else {
792 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
793 }
794
795 if (rctx->last)
796 hdr |= SAHARA_HDR_MDHA_PDATA;
797
798 if (hweight_long(hdr) % 2 == 0)
799 hdr |= SAHARA_HDR_PARITY_BIT;
800
801 return hdr;
802}
803
804static int sahara_sha_hw_links_create(struct sahara_dev *dev,
805 struct sahara_sha_reqctx *rctx,
806 int start)
807{
808 struct scatterlist *sg;
809 unsigned int i;
810 int ret;
811
812 dev->in_sg = rctx->in_sg;
813
814 dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
815 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
816 dev_err(dev->device, "not enough hw links (%d)\n",
817 dev->nb_in_sg + dev->nb_out_sg);
818 return -EINVAL;
819 }
820
821 if (rctx->in_sg_chained) {
822 i = start;
823 sg = dev->in_sg;
824 while (sg) {
825 ret = dma_map_sg(dev->device, sg, 1,
826 DMA_TO_DEVICE);
827 if (!ret)
828 return -EFAULT;
829
830 dev->hw_link[i]->len = sg->length;
831 dev->hw_link[i]->p = sg->dma_address;
832 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
833 sg = sg_next(sg);
834 i += 1;
835 }
836 dev->hw_link[i-1]->next = 0;
837 } else {
838 sg = dev->in_sg;
839 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
840 DMA_TO_DEVICE);
841 if (!ret)
842 return -EFAULT;
843
844 for (i = start; i < dev->nb_in_sg + start; i++) {
845 dev->hw_link[i]->len = sg->length;
846 dev->hw_link[i]->p = sg->dma_address;
847 if (i == (dev->nb_in_sg + start - 1)) {
848 dev->hw_link[i]->next = 0;
849 } else {
850 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
851 sg = sg_next(sg);
852 }
853 }
854 }
855
856 return i;
857}
858
859static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
860 struct sahara_sha_reqctx *rctx,
861 struct ahash_request *req,
862 int index)
863{
864 unsigned result_len;
865 int i = index;
866
867 if (rctx->first)
868 /* Create initial descriptor: #8*/
869 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
870 else
871 /* Create hash descriptor: #10. Must follow #6. */
872 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
873
874 dev->hw_desc[index]->len1 = rctx->total;
875 if (dev->hw_desc[index]->len1 == 0) {
876 /* if len1 is 0, p1 must be 0, too */
877 dev->hw_desc[index]->p1 = 0;
878 rctx->sg_in_idx = 0;
879 } else {
880 /* Create input links */
881 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
882 i = sahara_sha_hw_links_create(dev, rctx, index);
883
884 rctx->sg_in_idx = index;
885 if (i < 0)
886 return i;
887 }
888
889 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
890
891 /* Save the context for the next operation */
892 result_len = rctx->context_size;
893 dev->hw_link[i]->p = dev->context_phys_base;
894
895 dev->hw_link[i]->len = result_len;
896 dev->hw_desc[index]->len2 = result_len;
897
898 dev->hw_link[i]->next = 0;
899
900 return 0;
901}
902
903/*
904 * Load descriptor aka #6
905 *
906 * To load a previously saved context back to the MDHA unit
907 *
908 * p1: Saved Context
909 * p2: NULL
910 *
911 */
912static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
913 struct sahara_sha_reqctx *rctx,
914 struct ahash_request *req,
915 int index)
916{
917 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
918
919 dev->hw_desc[index]->len1 = rctx->context_size;
920 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
921 dev->hw_desc[index]->len2 = 0;
922 dev->hw_desc[index]->p2 = 0;
923
924 dev->hw_link[index]->len = rctx->context_size;
925 dev->hw_link[index]->p = dev->context_phys_base;
926 dev->hw_link[index]->next = 0;
927
928 return 0;
929}
930
931static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
932{
933 if (!sg || !sg->length)
934 return nbytes;
935
936 while (nbytes && sg) {
937 if (nbytes <= sg->length) {
938 sg->length = nbytes;
939 sg_mark_end(sg);
940 break;
941 }
942 nbytes -= sg->length;
943 sg = scatterwalk_sg_next(sg);
944 }
945
946 return nbytes;
947}
948
949static int sahara_sha_prepare_request(struct ahash_request *req)
950{
951 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
952 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
953 unsigned int hash_later;
954 unsigned int block_size;
955 unsigned int len;
956
957 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
958
959 /* append bytes from previous operation */
960 len = rctx->buf_cnt + req->nbytes;
961
962 /* only the last transfer can be padded in hardware */
963 if (!rctx->last && (len < block_size)) {
964 /* to few data, save for next operation */
965 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
966 0, req->nbytes, 0);
967 rctx->buf_cnt += req->nbytes;
968
969 return 0;
970 }
971
972 /* add data from previous operation first */
973 if (rctx->buf_cnt)
974 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
975
976 /* data must always be a multiple of block_size */
977 hash_later = rctx->last ? 0 : len & (block_size - 1);
978 if (hash_later) {
979 unsigned int offset = req->nbytes - hash_later;
980 /* Save remaining bytes for later use */
981 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
982 hash_later, 0);
983 }
984
985 /* nbytes should now be multiple of blocksize */
986 req->nbytes = req->nbytes - hash_later;
987
988 sahara_walk_and_recalc(req->src, req->nbytes);
989
990 /* have data from previous operation and current */
991 if (rctx->buf_cnt && req->nbytes) {
992 sg_init_table(rctx->in_sg_chain, 2);
993 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
994
995 scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
996
997 rctx->total = req->nbytes + rctx->buf_cnt;
998 rctx->in_sg = rctx->in_sg_chain;
999
1000 rctx->in_sg_chained = true;
1001 req->src = rctx->in_sg_chain;
1002 /* only data from previous operation */
1003 } else if (rctx->buf_cnt) {
1004 if (req->src)
1005 rctx->in_sg = req->src;
1006 else
1007 rctx->in_sg = rctx->in_sg_chain;
1008 /* buf was copied into rembuf above */
1009 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
1010 rctx->total = rctx->buf_cnt;
1011 rctx->in_sg_chained = false;
1012 /* no data from previous operation */
1013 } else {
1014 rctx->in_sg = req->src;
1015 rctx->total = req->nbytes;
1016 req->src = rctx->in_sg;
1017 rctx->in_sg_chained = false;
1018 }
1019
1020 /* on next call, we only have the remaining data in the buffer */
1021 rctx->buf_cnt = hash_later;
1022
1023 return -EINPROGRESS;
1024}
1025
1026static void sahara_sha_unmap_sg(struct sahara_dev *dev,
1027 struct sahara_sha_reqctx *rctx)
1028{
1029 struct scatterlist *sg;
1030
1031 if (rctx->in_sg_chained) {
1032 sg = dev->in_sg;
1033 while (sg) {
1034 dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
1035 sg = sg_next(sg);
1036 }
1037 } else {
1038 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1039 DMA_TO_DEVICE);
1040 }
1041}
1042
1043static int sahara_sha_process(struct ahash_request *req)
1044{
1045 struct sahara_dev *dev = dev_ptr;
1046 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1047 int ret = -EINPROGRESS;
1048
1049 ret = sahara_sha_prepare_request(req);
1050 if (!ret)
1051 return ret;
1052
1053 if (rctx->first) {
1054 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1055 dev->hw_desc[0]->next = 0;
1056 rctx->first = 0;
1057 } else {
1058 memcpy(dev->context_base, rctx->context, rctx->context_size);
1059
1060 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1061 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1062 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1063 dev->hw_desc[1]->next = 0;
1064 }
1065
1066 sahara_dump_descriptors(dev);
1067 sahara_dump_links(dev);
1068
1069 reinit_completion(&dev->dma_completion);
1070
1071 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1072
1073 ret = wait_for_completion_timeout(&dev->dma_completion,
1074 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1075 if (!ret) {
1076 dev_err(dev->device, "SHA timeout\n");
1077 return -ETIMEDOUT;
1078 }
1079
1080 if (rctx->sg_in_idx)
1081 sahara_sha_unmap_sg(dev, rctx);
1082
1083 memcpy(rctx->context, dev->context_base, rctx->context_size);
1084
1085 if (req->result)
1086 memcpy(req->result, rctx->context, rctx->digest_size);
1087
1088 return 0;
1089}
1090
1091static int sahara_queue_manage(void *data)
1092{
1093 struct sahara_dev *dev = (struct sahara_dev *)data;
1094 struct crypto_async_request *async_req;
1095 int ret = 0;
1096
1097 do {
1098 __set_current_state(TASK_INTERRUPTIBLE);
1099
1100 mutex_lock(&dev->queue_mutex);
1101 async_req = crypto_dequeue_request(&dev->queue);
1102 mutex_unlock(&dev->queue_mutex);
1103
1104 if (async_req) {
1105 if (crypto_tfm_alg_type(async_req->tfm) ==
1106 CRYPTO_ALG_TYPE_AHASH) {
1107 struct ahash_request *req =
1108 ahash_request_cast(async_req);
1109
1110 ret = sahara_sha_process(req);
1111 } else {
1112 struct ablkcipher_request *req =
1113 ablkcipher_request_cast(async_req);
1114
1115 ret = sahara_aes_process(req);
1116 }
1117
1118 async_req->complete(async_req, ret);
1119
1120 continue;
1121 }
1122
1123 schedule();
1124 } while (!kthread_should_stop());
1125
1126 return 0;
1127}
1128
1129static int sahara_sha_enqueue(struct ahash_request *req, int last)
1130{
1131 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1132 struct sahara_dev *dev = dev_ptr;
1133 int ret;
1134
1135 if (!req->nbytes && !last)
1136 return 0;
1137
1138 mutex_lock(&rctx->mutex);
1139 rctx->last = last;
1140
1141 if (!rctx->active) {
1142 rctx->active = 1;
1143 rctx->first = 1;
1144 }
1145
1146 mutex_lock(&dev->queue_mutex);
1147 ret = crypto_enqueue_request(&dev->queue, &req->base);
1148 mutex_unlock(&dev->queue_mutex);
1149
1150 wake_up_process(dev->kthread);
1151 mutex_unlock(&rctx->mutex);
1152
1153 return ret;
1154}
1155
1156static int sahara_sha_init(struct ahash_request *req)
1157{
1158 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1159 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1160
1161 memset(rctx, 0, sizeof(*rctx));
1162
1163 switch (crypto_ahash_digestsize(tfm)) {
1164 case SHA1_DIGEST_SIZE:
1165 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1166 rctx->digest_size = SHA1_DIGEST_SIZE;
1167 break;
1168 case SHA256_DIGEST_SIZE:
1169 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1170 rctx->digest_size = SHA256_DIGEST_SIZE;
1171 break;
1172 default:
1173 return -EINVAL;
1174 }
1175
1176 rctx->context_size = rctx->digest_size + 4;
1177 rctx->active = 0;
1178
1179 mutex_init(&rctx->mutex);
1180
1181 return 0;
1182}
1183
1184static int sahara_sha_update(struct ahash_request *req)
1185{
1186 return sahara_sha_enqueue(req, 0);
1187}
1188
1189static int sahara_sha_final(struct ahash_request *req)
1190{
1191 req->nbytes = 0;
1192 return sahara_sha_enqueue(req, 1);
1193}
1194
1195static int sahara_sha_finup(struct ahash_request *req)
1196{
1197 return sahara_sha_enqueue(req, 1);
1198}
1199
1200static int sahara_sha_digest(struct ahash_request *req)
1201{
1202 sahara_sha_init(req);
1203
1204 return sahara_sha_finup(req);
1205}
1206
1207static int sahara_sha_export(struct ahash_request *req, void *out)
1208{
1209 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1210 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1211 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1212
1213 memcpy(out, ctx, sizeof(struct sahara_ctx));
1214 memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1215 sizeof(struct sahara_sha_reqctx));
1216
1217 return 0;
1218}
1219
1220static int sahara_sha_import(struct ahash_request *req, const void *in)
1221{
1222 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1223 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1224 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1225
1226 memcpy(ctx, in, sizeof(struct sahara_ctx));
1227 memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1228 sizeof(struct sahara_sha_reqctx));
1229
1230 return 0;
1231}
1232
1233static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1234{
1235 const char *name = crypto_tfm_alg_name(tfm);
1236 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1237
1238 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1239 CRYPTO_ALG_NEED_FALLBACK);
1240 if (IS_ERR(ctx->shash_fallback)) {
1241 pr_err("Error allocating fallback algo %s\n", name);
1242 return PTR_ERR(ctx->shash_fallback);
1243 }
1244 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1245 sizeof(struct sahara_sha_reqctx) +
1246 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1247
1248 return 0;
1249}
1250
1251static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1252{
1253 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1254
1255 crypto_free_shash(ctx->shash_fallback);
1256 ctx->shash_fallback = NULL;
1257}
1258
755static struct crypto_alg aes_algs[] = { 1259static struct crypto_alg aes_algs[] = {
756{ 1260{
757 .cra_name = "ecb(aes)", 1261 .cra_name = "ecb(aes)",
@@ -797,14 +1301,66 @@ static struct crypto_alg aes_algs[] = {
797} 1301}
798}; 1302};
799 1303
1304static struct ahash_alg sha_v3_algs[] = {
1305{
1306 .init = sahara_sha_init,
1307 .update = sahara_sha_update,
1308 .final = sahara_sha_final,
1309 .finup = sahara_sha_finup,
1310 .digest = sahara_sha_digest,
1311 .export = sahara_sha_export,
1312 .import = sahara_sha_import,
1313 .halg.digestsize = SHA1_DIGEST_SIZE,
1314 .halg.base = {
1315 .cra_name = "sha1",
1316 .cra_driver_name = "sahara-sha1",
1317 .cra_priority = 300,
1318 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1319 CRYPTO_ALG_ASYNC |
1320 CRYPTO_ALG_NEED_FALLBACK,
1321 .cra_blocksize = SHA1_BLOCK_SIZE,
1322 .cra_ctxsize = sizeof(struct sahara_ctx),
1323 .cra_alignmask = 0,
1324 .cra_module = THIS_MODULE,
1325 .cra_init = sahara_sha_cra_init,
1326 .cra_exit = sahara_sha_cra_exit,
1327 }
1328},
1329};
1330
1331static struct ahash_alg sha_v4_algs[] = {
1332{
1333 .init = sahara_sha_init,
1334 .update = sahara_sha_update,
1335 .final = sahara_sha_final,
1336 .finup = sahara_sha_finup,
1337 .digest = sahara_sha_digest,
1338 .export = sahara_sha_export,
1339 .import = sahara_sha_import,
1340 .halg.digestsize = SHA256_DIGEST_SIZE,
1341 .halg.base = {
1342 .cra_name = "sha256",
1343 .cra_driver_name = "sahara-sha256",
1344 .cra_priority = 300,
1345 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1346 CRYPTO_ALG_ASYNC |
1347 CRYPTO_ALG_NEED_FALLBACK,
1348 .cra_blocksize = SHA256_BLOCK_SIZE,
1349 .cra_ctxsize = sizeof(struct sahara_ctx),
1350 .cra_alignmask = 0,
1351 .cra_module = THIS_MODULE,
1352 .cra_init = sahara_sha_cra_init,
1353 .cra_exit = sahara_sha_cra_exit,
1354 }
1355},
1356};
1357
800static irqreturn_t sahara_irq_handler(int irq, void *data) 1358static irqreturn_t sahara_irq_handler(int irq, void *data)
801{ 1359{
802 struct sahara_dev *dev = (struct sahara_dev *)data; 1360 struct sahara_dev *dev = (struct sahara_dev *)data;
803 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS); 1361 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
804 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); 1362 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
805 1363
806 del_timer(&dev->watchdog);
807
808 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR, 1364 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
809 SAHARA_REG_CMD); 1365 SAHARA_REG_CMD);
810 1366
@@ -819,7 +1375,7 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
819 dev->error = -EINVAL; 1375 dev->error = -EINVAL;
820 } 1376 }
821 1377
822 tasklet_schedule(&dev->done_task); 1378 complete(&dev->dma_completion);
823 1379
824 return IRQ_HANDLED; 1380 return IRQ_HANDLED;
825} 1381}
@@ -827,7 +1383,8 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
827 1383
828static int sahara_register_algs(struct sahara_dev *dev) 1384static int sahara_register_algs(struct sahara_dev *dev)
829{ 1385{
830 int err, i, j; 1386 int err;
1387 unsigned int i, j, k, l;
831 1388
832 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 1389 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
833 INIT_LIST_HEAD(&aes_algs[i].cra_list); 1390 INIT_LIST_HEAD(&aes_algs[i].cra_list);
@@ -836,8 +1393,29 @@ static int sahara_register_algs(struct sahara_dev *dev)
836 goto err_aes_algs; 1393 goto err_aes_algs;
837 } 1394 }
838 1395
1396 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1397 err = crypto_register_ahash(&sha_v3_algs[k]);
1398 if (err)
1399 goto err_sha_v3_algs;
1400 }
1401
1402 if (dev->version > SAHARA_VERSION_3)
1403 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1404 err = crypto_register_ahash(&sha_v4_algs[l]);
1405 if (err)
1406 goto err_sha_v4_algs;
1407 }
1408
839 return 0; 1409 return 0;
840 1410
1411err_sha_v4_algs:
1412 for (j = 0; j < l; j++)
1413 crypto_unregister_ahash(&sha_v4_algs[j]);
1414
1415err_sha_v3_algs:
1416 for (j = 0; j < k; j++)
1417 crypto_unregister_ahash(&sha_v4_algs[j]);
1418
841err_aes_algs: 1419err_aes_algs:
842 for (j = 0; j < i; j++) 1420 for (j = 0; j < i; j++)
843 crypto_unregister_alg(&aes_algs[j]); 1421 crypto_unregister_alg(&aes_algs[j]);
@@ -847,10 +1425,17 @@ err_aes_algs:
847 1425
848static void sahara_unregister_algs(struct sahara_dev *dev) 1426static void sahara_unregister_algs(struct sahara_dev *dev)
849{ 1427{
850 int i; 1428 unsigned int i;
851 1429
852 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) 1430 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
853 crypto_unregister_alg(&aes_algs[i]); 1431 crypto_unregister_alg(&aes_algs[i]);
1432
1433 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1434 crypto_unregister_ahash(&sha_v3_algs[i]);
1435
1436 if (dev->version > SAHARA_VERSION_3)
1437 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1438 crypto_unregister_ahash(&sha_v4_algs[i]);
854} 1439}
855 1440
856static struct platform_device_id sahara_platform_ids[] = { 1441static struct platform_device_id sahara_platform_ids[] = {
@@ -860,6 +1445,7 @@ static struct platform_device_id sahara_platform_ids[] = {
860MODULE_DEVICE_TABLE(platform, sahara_platform_ids); 1445MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
861 1446
862static struct of_device_id sahara_dt_ids[] = { 1447static struct of_device_id sahara_dt_ids[] = {
1448 { .compatible = "fsl,imx53-sahara" },
863 { .compatible = "fsl,imx27-sahara" }, 1449 { .compatible = "fsl,imx27-sahara" },
864 { /* sentinel */ } 1450 { /* sentinel */ }
865}; 1451};
@@ -939,6 +1525,16 @@ static int sahara_probe(struct platform_device *pdev)
939 dev->iv_base = dev->key_base + AES_KEYSIZE_128; 1525 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
940 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128; 1526 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
941 1527
1528 /* Allocate space for context: largest digest + message length field */
1529 dev->context_base = dma_alloc_coherent(&pdev->dev,
1530 SHA256_DIGEST_SIZE + 4,
1531 &dev->context_phys_base, GFP_KERNEL);
1532 if (!dev->context_base) {
1533 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1534 err = -ENOMEM;
1535 goto err_key;
1536 }
1537
942 /* Allocate space for HW links */ 1538 /* Allocate space for HW links */
943 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, 1539 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
944 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), 1540 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
@@ -956,28 +1552,40 @@ static int sahara_probe(struct platform_device *pdev)
956 1552
957 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); 1553 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
958 1554
1555 spin_lock_init(&dev->lock);
1556 mutex_init(&dev->queue_mutex);
1557
959 dev_ptr = dev; 1558 dev_ptr = dev;
960 1559
961 tasklet_init(&dev->queue_task, sahara_aes_queue_task, 1560 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
962 (unsigned long)dev); 1561 if (IS_ERR(dev->kthread)) {
963 tasklet_init(&dev->done_task, sahara_aes_done_task, 1562 err = PTR_ERR(dev->kthread);
964 (unsigned long)dev); 1563 goto err_link;
1564 }
965 1565
966 init_timer(&dev->watchdog); 1566 init_completion(&dev->dma_completion);
967 dev->watchdog.function = &sahara_watchdog;
968 dev->watchdog.data = (unsigned long)dev;
969 1567
970 clk_prepare_enable(dev->clk_ipg); 1568 clk_prepare_enable(dev->clk_ipg);
971 clk_prepare_enable(dev->clk_ahb); 1569 clk_prepare_enable(dev->clk_ahb);
972 1570
973 version = sahara_read(dev, SAHARA_REG_VERSION); 1571 version = sahara_read(dev, SAHARA_REG_VERSION);
974 if (version != SAHARA_VERSION_3) { 1572 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1573 if (version != SAHARA_VERSION_3)
1574 err = -ENODEV;
1575 } else if (of_device_is_compatible(pdev->dev.of_node,
1576 "fsl,imx53-sahara")) {
1577 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1578 err = -ENODEV;
1579 version = (version >> 8) & 0xff;
1580 }
1581 if (err == -ENODEV) {
975 dev_err(&pdev->dev, "SAHARA version %d not supported\n", 1582 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
976 version); 1583 version);
977 err = -ENODEV;
978 goto err_algs; 1584 goto err_algs;
979 } 1585 }
980 1586
1587 dev->version = version;
1588
981 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH, 1589 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
982 SAHARA_REG_CMD); 1590 SAHARA_REG_CMD);
983 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) | 1591 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
@@ -1000,11 +1608,15 @@ err_algs:
1000 dev->hw_link[0], dev->hw_phys_link[0]); 1608 dev->hw_link[0], dev->hw_phys_link[0]);
1001 clk_disable_unprepare(dev->clk_ipg); 1609 clk_disable_unprepare(dev->clk_ipg);
1002 clk_disable_unprepare(dev->clk_ahb); 1610 clk_disable_unprepare(dev->clk_ahb);
1611 kthread_stop(dev->kthread);
1003 dev_ptr = NULL; 1612 dev_ptr = NULL;
1004err_link: 1613err_link:
1005 dma_free_coherent(&pdev->dev, 1614 dma_free_coherent(&pdev->dev,
1006 2 * AES_KEYSIZE_128, 1615 2 * AES_KEYSIZE_128,
1007 dev->key_base, dev->key_phys_base); 1616 dev->key_base, dev->key_phys_base);
1617 dma_free_coherent(&pdev->dev,
1618 SHA256_DIGEST_SIZE,
1619 dev->context_base, dev->context_phys_base);
1008err_key: 1620err_key:
1009 dma_free_coherent(&pdev->dev, 1621 dma_free_coherent(&pdev->dev,
1010 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), 1622 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
@@ -1027,8 +1639,7 @@ static int sahara_remove(struct platform_device *pdev)
1027 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), 1639 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1028 dev->hw_desc[0], dev->hw_phys_desc[0]); 1640 dev->hw_desc[0], dev->hw_phys_desc[0]);
1029 1641
1030 tasklet_kill(&dev->done_task); 1642 kthread_stop(dev->kthread);
1031 tasklet_kill(&dev->queue_task);
1032 1643
1033 sahara_unregister_algs(dev); 1644 sahara_unregister_algs(dev);
1034 1645
@@ -1055,4 +1666,5 @@ module_platform_driver(sahara_driver);
1055 1666
1056MODULE_LICENSE("GPL"); 1667MODULE_LICENSE("GPL");
1057MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>"); 1668MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1669MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1058MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator"); 1670MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 92105f3dc8e0..7c035de9055e 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1688,6 +1688,7 @@ static void ux500_cryp_shutdown(struct platform_device *pdev)
1688 1688
1689} 1689}
1690 1690
1691#ifdef CONFIG_PM_SLEEP
1691static int ux500_cryp_suspend(struct device *dev) 1692static int ux500_cryp_suspend(struct device *dev)
1692{ 1693{
1693 int ret; 1694 int ret;
@@ -1768,6 +1769,7 @@ static int ux500_cryp_resume(struct device *dev)
1768 1769
1769 return ret; 1770 return ret;
1770} 1771}
1772#endif
1771 1773
1772static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); 1774static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
1773 1775
@@ -1810,7 +1812,7 @@ module_exit(ux500_cryp_mod_fini);
1810module_param(cryp_mode, int, 0); 1812module_param(cryp_mode, int, 0);
1811 1813
1812MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine."); 1814MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1813MODULE_ALIAS("aes-all"); 1815MODULE_ALIAS_CRYPTO("aes-all");
1814MODULE_ALIAS("des-all"); 1816MODULE_ALIAS_CRYPTO("des-all");
1815 1817
1816MODULE_LICENSE("GPL"); 1818MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 1c73f4fbc252..76ecc8d143d0 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1881,6 +1881,7 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
1881 __func__); 1881 __func__);
1882} 1882}
1883 1883
1884#ifdef CONFIG_PM_SLEEP
1884/** 1885/**
1885 * ux500_hash_suspend - Function that suspends the hash device. 1886 * ux500_hash_suspend - Function that suspends the hash device.
1886 * @dev: Device to suspend. 1887 * @dev: Device to suspend.
@@ -1949,6 +1950,7 @@ static int ux500_hash_resume(struct device *dev)
1949 1950
1950 return ret; 1951 return ret;
1951} 1952}
1953#endif
1952 1954
1953static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); 1955static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1954 1956
@@ -1995,7 +1997,7 @@ module_exit(ux500_hash_mod_fini);
1995MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine."); 1997MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1996MODULE_LICENSE("GPL"); 1998MODULE_LICENSE("GPL");
1997 1999
1998MODULE_ALIAS("sha1-all"); 2000MODULE_ALIAS_CRYPTO("sha1-all");
1999MODULE_ALIAS("sha256-all"); 2001MODULE_ALIAS_CRYPTO("sha256-all");
2000MODULE_ALIAS("hmac-sha1-all"); 2002MODULE_ALIAS_CRYPTO("hmac-sha1-all");
2001MODULE_ALIAS("hmac-sha256-all"); 2003MODULE_ALIAS_CRYPTO("hmac-sha256-all");
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 99485415dcc2..91e97ec01418 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -44,6 +44,7 @@
44#include <linux/hrtimer.h> 44#include <linux/hrtimer.h>
45#include <linux/ktime.h> 45#include <linux/ktime.h>
46#include <asm/facility.h> 46#include <asm/facility.h>
47#include <linux/crypto.h>
47 48
48#include "ap_bus.h" 49#include "ap_bus.h"
49 50
@@ -71,7 +72,7 @@ MODULE_AUTHOR("IBM Corporation");
71MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ 72MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
72 "Copyright IBM Corp. 2006, 2012"); 73 "Copyright IBM Corp. 2006, 2012");
73MODULE_LICENSE("GPL"); 74MODULE_LICENSE("GPL");
74MODULE_ALIAS("z90crypt"); 75MODULE_ALIAS_CRYPTO("z90crypt");
75 76
76/* 77/*
77 * Module parameter 78 * Module parameter
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 74b13ec1ebd4..98abda9ed3aa 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -17,6 +17,32 @@
17 17
18struct crypto_ahash; 18struct crypto_ahash;
19 19
20/**
21 * DOC: Message Digest Algorithm Definitions
22 *
23 * These data structures define modular message digest algorithm
24 * implementations, managed via crypto_register_ahash(),
25 * crypto_register_shash(), crypto_unregister_ahash() and
26 * crypto_unregister_shash().
27 */
28
29/**
30 * struct hash_alg_common - define properties of message digest
31 * @digestsize: Size of the result of the transformation. A buffer of this size
32 * must be available to the @final and @finup calls, so they can
33 * store the resulting hash into it. For various predefined sizes,
34 * search include/crypto/ using
35 * git grep _DIGEST_SIZE include/crypto.
36 * @statesize: Size of the block for partial state of the transformation. A
37 * buffer of this size must be passed to the @export function as it
38 * will save the partial state of the transformation into it. On the
39 * other side, the @import function will load the state from a
40 * buffer of this size as well.
41 * @base: Start of data structure of cipher algorithm. The common data
42 * structure of crypto_alg contains information common to all ciphers.
43 * The hash_alg_common data structure now adds the hash-specific
44 * information.
45 */
20struct hash_alg_common { 46struct hash_alg_common {
21 unsigned int digestsize; 47 unsigned int digestsize;
22 unsigned int statesize; 48 unsigned int statesize;
@@ -37,6 +63,63 @@ struct ahash_request {
37 void *__ctx[] CRYPTO_MINALIGN_ATTR; 63 void *__ctx[] CRYPTO_MINALIGN_ATTR;
38}; 64};
39 65
66/**
67 * struct ahash_alg - asynchronous message digest definition
68 * @init: Initialize the transformation context. Intended only to initialize the
69 * state of the HASH transformation at the begining. This shall fill in
70 * the internal structures used during the entire duration of the whole
71 * transformation. No data processing happens at this point.
72 * @update: Push a chunk of data into the driver for transformation. This
73 * function actually pushes blocks of data from upper layers into the
74 * driver, which then passes those to the hardware as seen fit. This
75 * function must not finalize the HASH transformation by calculating the
76 * final message digest as this only adds more data into the
77 * transformation. This function shall not modify the transformation
78 * context, as this function may be called in parallel with the same
79 * transformation object. Data processing can happen synchronously
80 * [SHASH] or asynchronously [AHASH] at this point.
81 * @final: Retrieve result from the driver. This function finalizes the
82 * transformation and retrieves the resulting hash from the driver and
83 * pushes it back to upper layers. No data processing happens at this
84 * point.
85 * @finup: Combination of @update and @final. This function is effectively a
86 * combination of @update and @final calls issued in sequence. As some
87 * hardware cannot do @update and @final separately, this callback was
88 * added to allow such hardware to be used at least by IPsec. Data
89 * processing can happen synchronously [SHASH] or asynchronously [AHASH]
90 * at this point.
91 * @digest: Combination of @init and @update and @final. This function
92 * effectively behaves as the entire chain of operations, @init,
93 * @update and @final issued in sequence. Just like @finup, this was
94 * added for hardware which cannot do even the @finup, but can only do
95 * the whole transformation in one run. Data processing can happen
96 * synchronously [SHASH] or asynchronously [AHASH] at this point.
97 * @setkey: Set optional key used by the hashing algorithm. Intended to push
98 * optional key used by the hashing algorithm from upper layers into
99 * the driver. This function can store the key in the transformation
100 * context or can outright program it into the hardware. In the former
101 * case, one must be careful to program the key into the hardware at
102 * appropriate time and one must be careful that .setkey() can be
103 * called multiple times during the existence of the transformation
104 * object. Not all hashing algorithms do implement this function as it
105 * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
106 * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
107 * this function. This function must be called before any other of the
108 * @init, @update, @final, @finup, @digest is called. No data
109 * processing happens at this point.
110 * @export: Export partial state of the transformation. This function dumps the
111 * entire state of the ongoing transformation into a provided block of
112 * data so it can be @import 'ed back later on. This is useful in case
113 * you want to save partial result of the transformation after
114 * processing certain amount of data and reload this partial result
115 * multiple times later on for multiple re-use. No data processing
116 * happens at this point.
117 * @import: Import partial state of the transformation. This function loads the
118 * entire state of the ongoing transformation from a provided block of
119 * data so the transformation can continue from this point onward. No
120 * data processing happens at this point.
121 * @halg: see struct hash_alg_common
122 */
40struct ahash_alg { 123struct ahash_alg {
41 int (*init)(struct ahash_request *req); 124 int (*init)(struct ahash_request *req);
42 int (*update)(struct ahash_request *req); 125 int (*update)(struct ahash_request *req);
@@ -63,6 +146,23 @@ struct shash_desc {
63 crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ 146 crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
64 struct shash_desc *shash = (struct shash_desc *)__##shash##_desc 147 struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
65 148
149/**
150 * struct shash_alg - synchronous message digest definition
151 * @init: see struct ahash_alg
152 * @update: see struct ahash_alg
153 * @final: see struct ahash_alg
154 * @finup: see struct ahash_alg
155 * @digest: see struct ahash_alg
156 * @export: see struct ahash_alg
157 * @import: see struct ahash_alg
158 * @setkey: see struct ahash_alg
159 * @digestsize: see struct ahash_alg
160 * @statesize: see struct ahash_alg
161 * @descsize: Size of the operational state for the message digest. This state
162 * size is the memory size that needs to be allocated for
163 * shash_desc.__ctx
164 * @base: internally used
165 */
66struct shash_alg { 166struct shash_alg {
67 int (*init)(struct shash_desc *desc); 167 int (*init)(struct shash_desc *desc);
68 int (*update)(struct shash_desc *desc, const u8 *data, 168 int (*update)(struct shash_desc *desc, const u8 *data,
@@ -107,11 +207,35 @@ struct crypto_shash {
107 struct crypto_tfm base; 207 struct crypto_tfm base;
108}; 208};
109 209
210/**
211 * DOC: Asynchronous Message Digest API
212 *
213 * The asynchronous message digest API is used with the ciphers of type
214 * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
215 *
216 * The asynchronous cipher operation discussion provided for the
217 * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well.
218 */
219
110static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) 220static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
111{ 221{
112 return container_of(tfm, struct crypto_ahash, base); 222 return container_of(tfm, struct crypto_ahash, base);
113} 223}
114 224
225/**
226 * crypto_alloc_ahash() - allocate ahash cipher handle
227 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
228 * ahash cipher
229 * @type: specifies the type of the cipher
230 * @mask: specifies the mask for the cipher
231 *
232 * Allocate a cipher handle for an ahash. The returned struct
233 * crypto_ahash is the cipher handle that is required for any subsequent
234 * API invocation for that ahash.
235 *
236 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
237 * of an error, PTR_ERR() returns the error code.
238 */
115struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, 239struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
116 u32 mask); 240 u32 mask);
117 241
@@ -120,6 +244,10 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
120 return &tfm->base; 244 return &tfm->base;
121} 245}
122 246
247/**
248 * crypto_free_ahash() - zeroize and free the ahash handle
249 * @tfm: cipher handle to be freed
250 */
123static inline void crypto_free_ahash(struct crypto_ahash *tfm) 251static inline void crypto_free_ahash(struct crypto_ahash *tfm)
124{ 252{
125 crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); 253 crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
@@ -143,6 +271,16 @@ static inline struct hash_alg_common *crypto_hash_alg_common(
143 return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); 271 return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
144} 272}
145 273
274/**
275 * crypto_ahash_digestsize() - obtain message digest size
276 * @tfm: cipher handle
277 *
278 * The size for the message digest created by the message digest cipher
279 * referenced with the cipher handle is returned.
280 *
281 *
282 * Return: message digest size of cipher
283 */
146static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) 284static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
147{ 285{
148 return crypto_hash_alg_common(tfm)->digestsize; 286 return crypto_hash_alg_common(tfm)->digestsize;
@@ -168,12 +306,32 @@ static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
168 crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); 306 crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
169} 307}
170 308
309/**
310 * crypto_ahash_reqtfm() - obtain cipher handle from request
311 * @req: asynchronous request handle that contains the reference to the ahash
312 * cipher handle
313 *
314 * Return the ahash cipher handle that is registered with the asynchronous
315 * request handle ahash_request.
316 *
317 * Return: ahash cipher handle
318 */
171static inline struct crypto_ahash *crypto_ahash_reqtfm( 319static inline struct crypto_ahash *crypto_ahash_reqtfm(
172 struct ahash_request *req) 320 struct ahash_request *req)
173{ 321{
174 return __crypto_ahash_cast(req->base.tfm); 322 return __crypto_ahash_cast(req->base.tfm);
175} 323}
176 324
325/**
326 * crypto_ahash_reqsize() - obtain size of the request data structure
327 * @tfm: cipher handle
328 *
329 * Return the size of the ahash state size. With the crypto_ahash_export
330 * function, the caller can export the state into a buffer whose size is
331 * defined with this function.
332 *
333 * Return: size of the ahash state
334 */
177static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) 335static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
178{ 336{
179 return tfm->reqsize; 337 return tfm->reqsize;
@@ -184,38 +342,166 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
184 return req->__ctx; 342 return req->__ctx;
185} 343}
186 344
345/**
346 * crypto_ahash_setkey - set key for cipher handle
347 * @tfm: cipher handle
348 * @key: buffer holding the key
349 * @keylen: length of the key in bytes
350 *
351 * The caller provided key is set for the ahash cipher. The cipher
352 * handle must point to a keyed hash in order for this function to succeed.
353 *
354 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
355 */
187int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 356int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
188 unsigned int keylen); 357 unsigned int keylen);
358
359/**
360 * crypto_ahash_finup() - update and finalize message digest
361 * @req: reference to the ahash_request handle that holds all information
362 * needed to perform the cipher operation
363 *
364 * This function is a "short-hand" for the function calls of
365 * crypto_ahash_update and crypto_shash_final. The parameters have the same
366 * meaning as discussed for those separate functions.
367 *
368 * Return: 0 if the message digest creation was successful; < 0 if an error
369 * occurred
370 */
189int crypto_ahash_finup(struct ahash_request *req); 371int crypto_ahash_finup(struct ahash_request *req);
372
373/**
374 * crypto_ahash_final() - calculate message digest
375 * @req: reference to the ahash_request handle that holds all information
376 * needed to perform the cipher operation
377 *
378 * Finalize the message digest operation and create the message digest
379 * based on all data added to the cipher handle. The message digest is placed
380 * into the output buffer registered with the ahash_request handle.
381 *
382 * Return: 0 if the message digest creation was successful; < 0 if an error
383 * occurred
384 */
190int crypto_ahash_final(struct ahash_request *req); 385int crypto_ahash_final(struct ahash_request *req);
386
387/**
388 * crypto_ahash_digest() - calculate message digest for a buffer
389 * @req: reference to the ahash_request handle that holds all information
390 * needed to perform the cipher operation
391 *
392 * This function is a "short-hand" for the function calls of crypto_ahash_init,
393 * crypto_ahash_update and crypto_ahash_final. The parameters have the same
394 * meaning as discussed for those separate three functions.
395 *
396 * Return: 0 if the message digest creation was successful; < 0 if an error
397 * occurred
398 */
191int crypto_ahash_digest(struct ahash_request *req); 399int crypto_ahash_digest(struct ahash_request *req);
192 400
401/**
402 * crypto_ahash_export() - extract current message digest state
403 * @req: reference to the ahash_request handle whose state is exported
404 * @out: output buffer of sufficient size that can hold the hash state
405 *
406 * This function exports the hash state of the ahash_request handle into the
407 * caller-allocated output buffer out which must have sufficient size (e.g. by
408 * calling crypto_ahash_reqsize).
409 *
410 * Return: 0 if the export was successful; < 0 if an error occurred
411 */
193static inline int crypto_ahash_export(struct ahash_request *req, void *out) 412static inline int crypto_ahash_export(struct ahash_request *req, void *out)
194{ 413{
195 return crypto_ahash_reqtfm(req)->export(req, out); 414 return crypto_ahash_reqtfm(req)->export(req, out);
196} 415}
197 416
417/**
418 * crypto_ahash_import() - import message digest state
419 * @req: reference to ahash_request handle the state is imported into
420 * @in: buffer holding the state
421 *
422 * This function imports the hash state into the ahash_request handle from the
423 * input buffer. That buffer should have been generated with the
424 * crypto_ahash_export function.
425 *
426 * Return: 0 if the import was successful; < 0 if an error occurred
427 */
198static inline int crypto_ahash_import(struct ahash_request *req, const void *in) 428static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
199{ 429{
200 return crypto_ahash_reqtfm(req)->import(req, in); 430 return crypto_ahash_reqtfm(req)->import(req, in);
201} 431}
202 432
433/**
434 * crypto_ahash_init() - (re)initialize message digest handle
435 * @req: ahash_request handle that already is initialized with all necessary
436 * data using the ahash_request_* API functions
437 *
438 * The call (re-)initializes the message digest referenced by the ahash_request
439 * handle. Any potentially existing state created by previous operations is
440 * discarded.
441 *
442 * Return: 0 if the message digest initialization was successful; < 0 if an
443 * error occurred
444 */
203static inline int crypto_ahash_init(struct ahash_request *req) 445static inline int crypto_ahash_init(struct ahash_request *req)
204{ 446{
205 return crypto_ahash_reqtfm(req)->init(req); 447 return crypto_ahash_reqtfm(req)->init(req);
206} 448}
207 449
450/**
451 * crypto_ahash_update() - add data to message digest for processing
452 * @req: ahash_request handle that was previously initialized with the
453 * crypto_ahash_init call.
454 *
455 * Updates the message digest state of the &ahash_request handle. The input data
456 * is pointed to by the scatter/gather list registered in the &ahash_request
457 * handle
458 *
459 * Return: 0 if the message digest update was successful; < 0 if an error
460 * occurred
461 */
208static inline int crypto_ahash_update(struct ahash_request *req) 462static inline int crypto_ahash_update(struct ahash_request *req)
209{ 463{
210 return crypto_ahash_reqtfm(req)->update(req); 464 return crypto_ahash_reqtfm(req)->update(req);
211} 465}
212 466
467/**
468 * DOC: Asynchronous Hash Request Handle
469 *
470 * The &ahash_request data structure contains all pointers to data
471 * required for the asynchronous cipher operation. This includes the cipher
472 * handle (which can be used by multiple &ahash_request instances), pointer
473 * to plaintext and the message digest output buffer, asynchronous callback
474 * function, etc. It acts as a handle to the ahash_request_* API calls in a
475 * similar way as ahash handle to the crypto_ahash_* API calls.
476 */
477
478/**
479 * ahash_request_set_tfm() - update cipher handle reference in request
480 * @req: request handle to be modified
481 * @tfm: cipher handle that shall be added to the request handle
482 *
483 * Allow the caller to replace the existing ahash handle in the request
484 * data structure with a different one.
485 */
213static inline void ahash_request_set_tfm(struct ahash_request *req, 486static inline void ahash_request_set_tfm(struct ahash_request *req,
214 struct crypto_ahash *tfm) 487 struct crypto_ahash *tfm)
215{ 488{
216 req->base.tfm = crypto_ahash_tfm(tfm); 489 req->base.tfm = crypto_ahash_tfm(tfm);
217} 490}
218 491
492/**
493 * ahash_request_alloc() - allocate request data structure
494 * @tfm: cipher handle to be registered with the request
495 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
496 *
497 * Allocate the request data structure that must be used with the ahash
498 * message digest API calls. During
499 * the allocation, the provided ahash handle
500 * is registered in the request data structure.
501 *
502 * Return: allocated request handle in case of success; IS_ERR() is true in case
503 * of an error, PTR_ERR() returns the error code.
504 */
219static inline struct ahash_request *ahash_request_alloc( 505static inline struct ahash_request *ahash_request_alloc(
220 struct crypto_ahash *tfm, gfp_t gfp) 506 struct crypto_ahash *tfm, gfp_t gfp)
221{ 507{
@@ -230,6 +516,10 @@ static inline struct ahash_request *ahash_request_alloc(
230 return req; 516 return req;
231} 517}
232 518
519/**
520 * ahash_request_free() - zeroize and free the request data structure
521 * @req: request data structure cipher handle to be freed
522 */
233static inline void ahash_request_free(struct ahash_request *req) 523static inline void ahash_request_free(struct ahash_request *req)
234{ 524{
235 kzfree(req); 525 kzfree(req);
@@ -241,6 +531,31 @@ static inline struct ahash_request *ahash_request_cast(
241 return container_of(req, struct ahash_request, base); 531 return container_of(req, struct ahash_request, base);
242} 532}
243 533
534/**
535 * ahash_request_set_callback() - set asynchronous callback function
536 * @req: request handle
537 * @flags: specify zero or an ORing of the flags
538 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
539 * increase the wait queue beyond the initial maximum size;
540 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
541 * @compl: callback function pointer to be registered with the request handle
542 * @data: The data pointer refers to memory that is not used by the kernel
543 * crypto API, but provided to the callback function for it to use. Here,
544 * the caller can provide a reference to memory the callback function can
545 * operate on. As the callback function is invoked asynchronously to the
546 * related functionality, it may need to access data structures of the
547 * related functionality which can be referenced using this pointer. The
548 * callback function can access the memory via the "data" field in the
549 * &crypto_async_request data structure provided to the callback function.
550 *
551 * This function allows setting the callback function that is triggered once
552 * the cipher operation completes.
553 *
554 * The callback function is registered with the &ahash_request handle and
555 * must comply with the following template
556 *
557 * void callback_function(struct crypto_async_request *req, int error)
558 */
244static inline void ahash_request_set_callback(struct ahash_request *req, 559static inline void ahash_request_set_callback(struct ahash_request *req,
245 u32 flags, 560 u32 flags,
246 crypto_completion_t compl, 561 crypto_completion_t compl,
@@ -251,6 +566,19 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
251 req->base.flags = flags; 566 req->base.flags = flags;
252} 567}
253 568
569/**
570 * ahash_request_set_crypt() - set data buffers
571 * @req: ahash_request handle to be updated
572 * @src: source scatter/gather list
573 * @result: buffer that is filled with the message digest -- the caller must
574 * ensure that the buffer has sufficient space by, for example, calling
575 * crypto_ahash_digestsize()
576 * @nbytes: number of bytes to process from the source scatter/gather list
577 *
578 * By using this call, the caller references the source scatter/gather list.
579 * The source scatter/gather list points to the data the message digest is to
580 * be calculated for.
581 */
254static inline void ahash_request_set_crypt(struct ahash_request *req, 582static inline void ahash_request_set_crypt(struct ahash_request *req,
255 struct scatterlist *src, u8 *result, 583 struct scatterlist *src, u8 *result,
256 unsigned int nbytes) 584 unsigned int nbytes)
@@ -260,6 +588,33 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
260 req->result = result; 588 req->result = result;
261} 589}
262 590
591/**
592 * DOC: Synchronous Message Digest API
593 *
594 * The synchronous message digest API is used with the ciphers of type
595 * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
596 *
597 * The message digest API is able to maintain state information for the
598 * caller.
599 *
600 * The synchronous message digest API can store user-related context in in its
601 * shash_desc request data structure.
602 */
603
604/**
605 * crypto_alloc_shash() - allocate message digest handle
606 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
607 * message digest cipher
608 * @type: specifies the type of the cipher
609 * @mask: specifies the mask for the cipher
610 *
611 * Allocate a cipher handle for a message digest. The returned &struct
612 * crypto_shash is the cipher handle that is required for any subsequent
613 * API invocation for that message digest.
614 *
615 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
616 * of an error, PTR_ERR() returns the error code.
617 */
263struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, 618struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
264 u32 mask); 619 u32 mask);
265 620
@@ -268,6 +623,10 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
268 return &tfm->base; 623 return &tfm->base;
269} 624}
270 625
626/**
627 * crypto_free_shash() - zeroize and free the message digest handle
628 * @tfm: cipher handle to be freed
629 */
271static inline void crypto_free_shash(struct crypto_shash *tfm) 630static inline void crypto_free_shash(struct crypto_shash *tfm)
272{ 631{
273 crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); 632 crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
@@ -279,6 +638,15 @@ static inline unsigned int crypto_shash_alignmask(
279 return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); 638 return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
280} 639}
281 640
641/**
642 * crypto_shash_blocksize() - obtain block size for cipher
643 * @tfm: cipher handle
644 *
645 * The block size for the message digest cipher referenced with the cipher
646 * handle is returned.
647 *
648 * Return: block size of cipher
649 */
282static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) 650static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
283{ 651{
284 return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); 652 return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
@@ -294,6 +662,15 @@ static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
294 return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); 662 return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
295} 663}
296 664
665/**
666 * crypto_shash_digestsize() - obtain message digest size
667 * @tfm: cipher handle
668 *
669 * The size for the message digest created by the message digest cipher
670 * referenced with the cipher handle is returned.
671 *
672 * Return: digest size of cipher
673 */
297static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) 674static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
298{ 675{
299 return crypto_shash_alg(tfm)->digestsize; 676 return crypto_shash_alg(tfm)->digestsize;
@@ -319,6 +696,21 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
319 crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); 696 crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
320} 697}
321 698
699/**
700 * crypto_shash_descsize() - obtain the operational state size
701 * @tfm: cipher handle
702 *
703 * The size of the operational state the cipher needs during operation is
704 * returned for the hash referenced with the cipher handle. This size is
705 * required to calculate the memory requirements to allow the caller allocating
706 * sufficient memory for operational state.
707 *
708 * The operational state is defined with struct shash_desc where the size of
709 * that data structure is to be calculated as
710 * sizeof(struct shash_desc) + crypto_shash_descsize(alg)
711 *
712 * Return: size of the operational state
713 */
322static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) 714static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
323{ 715{
324 return tfm->descsize; 716 return tfm->descsize;
@@ -329,29 +721,129 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
329 return desc->__ctx; 721 return desc->__ctx;
330} 722}
331 723
724/**
725 * crypto_shash_setkey() - set key for message digest
726 * @tfm: cipher handle
727 * @key: buffer holding the key
728 * @keylen: length of the key in bytes
729 *
730 * The caller provided key is set for the keyed message digest cipher. The
731 * cipher handle must point to a keyed message digest cipher in order for this
732 * function to succeed.
733 *
734 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
735 */
332int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, 736int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
333 unsigned int keylen); 737 unsigned int keylen);
738
739/**
740 * crypto_shash_digest() - calculate message digest for buffer
741 * @desc: see crypto_shash_final()
742 * @data: see crypto_shash_update()
743 * @len: see crypto_shash_update()
744 * @out: see crypto_shash_final()
745 *
746 * This function is a "short-hand" for the function calls of crypto_shash_init,
747 * crypto_shash_update and crypto_shash_final. The parameters have the same
748 * meaning as discussed for those separate three functions.
749 *
750 * Return: 0 if the message digest creation was successful; < 0 if an error
751 * occurred
752 */
334int crypto_shash_digest(struct shash_desc *desc, const u8 *data, 753int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
335 unsigned int len, u8 *out); 754 unsigned int len, u8 *out);
336 755
756/**
757 * crypto_shash_export() - extract operational state for message digest
758 * @desc: reference to the operational state handle whose state is exported
759 * @out: output buffer of sufficient size that can hold the hash state
760 *
761 * This function exports the hash state of the operational state handle into the
762 * caller-allocated output buffer out which must have sufficient size (e.g. by
763 * calling crypto_shash_descsize).
764 *
765 * Return: 0 if the export creation was successful; < 0 if an error occurred
766 */
337static inline int crypto_shash_export(struct shash_desc *desc, void *out) 767static inline int crypto_shash_export(struct shash_desc *desc, void *out)
338{ 768{
339 return crypto_shash_alg(desc->tfm)->export(desc, out); 769 return crypto_shash_alg(desc->tfm)->export(desc, out);
340} 770}
341 771
772/**
773 * crypto_shash_import() - import operational state
774 * @desc: reference to the operational state handle the state imported into
775 * @in: buffer holding the state
776 *
777 * This function imports the hash state into the operational state handle from
778 * the input buffer. That buffer should have been generated with the
779 * crypto_ahash_export function.
780 *
781 * Return: 0 if the import was successful; < 0 if an error occurred
782 */
342static inline int crypto_shash_import(struct shash_desc *desc, const void *in) 783static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
343{ 784{
344 return crypto_shash_alg(desc->tfm)->import(desc, in); 785 return crypto_shash_alg(desc->tfm)->import(desc, in);
345} 786}
346 787
788/**
789 * crypto_shash_init() - (re)initialize message digest
790 * @desc: operational state handle that is already filled
791 *
792 * The call (re-)initializes the message digest referenced by the
793 * operational state handle. Any potentially existing state created by
794 * previous operations is discarded.
795 *
796 * Return: 0 if the message digest initialization was successful; < 0 if an
797 * error occurred
798 */
347static inline int crypto_shash_init(struct shash_desc *desc) 799static inline int crypto_shash_init(struct shash_desc *desc)
348{ 800{
349 return crypto_shash_alg(desc->tfm)->init(desc); 801 return crypto_shash_alg(desc->tfm)->init(desc);
350} 802}
351 803
804/**
805 * crypto_shash_update() - add data to message digest for processing
806 * @desc: operational state handle that is already initialized
807 * @data: input data to be added to the message digest
808 * @len: length of the input data
809 *
810 * Updates the message digest state of the operational state handle.
811 *
812 * Return: 0 if the message digest update was successful; < 0 if an error
813 * occurred
814 */
352int crypto_shash_update(struct shash_desc *desc, const u8 *data, 815int crypto_shash_update(struct shash_desc *desc, const u8 *data,
353 unsigned int len); 816 unsigned int len);
817
818/**
819 * crypto_shash_final() - calculate message digest
820 * @desc: operational state handle that is already filled with data
821 * @out: output buffer filled with the message digest
822 *
823 * Finalize the message digest operation and create the message digest
824 * based on all data added to the cipher handle. The message digest is placed
825 * into the output buffer. The caller must ensure that the output buffer is
826 * large enough by using crypto_shash_digestsize.
827 *
828 * Return: 0 if the message digest creation was successful; < 0 if an error
829 * occurred
830 */
354int crypto_shash_final(struct shash_desc *desc, u8 *out); 831int crypto_shash_final(struct shash_desc *desc, u8 *out);
832
833/**
834 * crypto_shash_finup() - calculate message digest of buffer
835 * @desc: see crypto_shash_final()
836 * @data: see crypto_shash_update()
837 * @len: see crypto_shash_update()
838 * @out: see crypto_shash_final()
839 *
840 * This function is a "short-hand" for the function calls of
841 * crypto_shash_update and crypto_shash_final. The parameters have the same
842 * meaning as discussed for those separate functions.
843 *
844 * Return: 0 if the message digest creation was successful; < 0 if an error
845 * occurred
846 */
355int crypto_shash_finup(struct shash_desc *desc, const u8 *data, 847int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
356 unsigned int len, u8 *out); 848 unsigned int len, u8 *out);
357 849
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index d61c11170213..cd62bf4289e9 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -42,6 +42,7 @@ struct af_alg_completion {
42struct af_alg_control { 42struct af_alg_control {
43 struct af_alg_iv *iv; 43 struct af_alg_iv *iv;
44 int op; 44 int op;
45 unsigned int aead_assoclen;
45}; 46};
46 47
47struct af_alg_type { 48struct af_alg_type {
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index c93f9b917925..a16fb10142bf 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -20,11 +20,38 @@ extern struct crypto_rng *crypto_default_rng;
20int crypto_get_default_rng(void); 20int crypto_get_default_rng(void);
21void crypto_put_default_rng(void); 21void crypto_put_default_rng(void);
22 22
23/**
24 * DOC: Random number generator API
25 *
26 * The random number generator API is used with the ciphers of type
27 * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
28 */
29
23static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) 30static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
24{ 31{
25 return (struct crypto_rng *)tfm; 32 return (struct crypto_rng *)tfm;
26} 33}
27 34
35/**
36 * crypto_alloc_rng() -- allocate RNG handle
37 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
38 * message digest cipher
39 * @type: specifies the type of the cipher
40 * @mask: specifies the mask for the cipher
41 *
42 * Allocate a cipher handle for a random number generator. The returned struct
43 * crypto_rng is the cipher handle that is required for any subsequent
44 * API invocation for that random number generator.
45 *
46 * For all random number generators, this call creates a new private copy of
47 * the random number generator that does not share a state with other
48 * instances. The only exception is the "krng" random number generator which
49 * is a kernel crypto API use case for the get_random_bytes() function of the
50 * /dev/random driver.
51 *
52 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
53 * of an error, PTR_ERR() returns the error code.
54 */
28static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name, 55static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
29 u32 type, u32 mask) 56 u32 type, u32 mask)
30{ 57{
@@ -40,6 +67,14 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
40 return &tfm->base; 67 return &tfm->base;
41} 68}
42 69
70/**
71 * crypto_rng_alg - obtain name of RNG
72 * @tfm: cipher handle
73 *
74 * Return the generic name (cra_name) of the initialized random number generator
75 *
76 * Return: generic name string
77 */
43static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) 78static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
44{ 79{
45 return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; 80 return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
@@ -50,23 +85,68 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
50 return &crypto_rng_tfm(tfm)->crt_rng; 85 return &crypto_rng_tfm(tfm)->crt_rng;
51} 86}
52 87
88/**
89 * crypto_free_rng() - zeroize and free RNG handle
90 * @tfm: cipher handle to be freed
91 */
53static inline void crypto_free_rng(struct crypto_rng *tfm) 92static inline void crypto_free_rng(struct crypto_rng *tfm)
54{ 93{
55 crypto_free_tfm(crypto_rng_tfm(tfm)); 94 crypto_free_tfm(crypto_rng_tfm(tfm));
56} 95}
57 96
97/**
98 * crypto_rng_get_bytes() - get random number
99 * @tfm: cipher handle
100 * @rdata: output buffer holding the random numbers
101 * @dlen: length of the output buffer
102 *
103 * This function fills the caller-allocated buffer with random numbers using the
104 * random number generator referenced by the cipher handle.
105 *
106 * Return: > 0 function was successful and returns the number of generated
107 * bytes; < 0 if an error occurred
108 */
58static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, 109static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
59 u8 *rdata, unsigned int dlen) 110 u8 *rdata, unsigned int dlen)
60{ 111{
61 return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen); 112 return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
62} 113}
63 114
115/**
116 * crypto_rng_reset() - re-initialize the RNG
117 * @tfm: cipher handle
118 * @seed: seed input data
119 * @slen: length of the seed input data
120 *
121 * The reset function completely re-initializes the random number generator
122 * referenced by the cipher handle by clearing the current state. The new state
123 * is initialized with the caller provided seed or automatically, depending
124 * on the random number generator type (the ANSI X9.31 RNG requires
125 * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
126 * The seed is provided as a parameter to this function call. The provided seed
127 * should have the length of the seed size defined for the random number
128 * generator as defined by crypto_rng_seedsize.
129 *
130 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
131 */
64static inline int crypto_rng_reset(struct crypto_rng *tfm, 132static inline int crypto_rng_reset(struct crypto_rng *tfm,
65 u8 *seed, unsigned int slen) 133 u8 *seed, unsigned int slen)
66{ 134{
67 return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen); 135 return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
68} 136}
69 137
138/**
139 * crypto_rng_seedsize() - obtain seed size of RNG
140 * @tfm: cipher handle
141 *
142 * The function returns the seed size for the random number generator
143 * referenced by the cipher handle. This value may be zero if the random
144 * number generator does not implement or require a reseeding. For example,
145 * the SP800-90A DRBGs implement an automated reseeding after reaching a
146 * pre-defined threshold.
147 *
148 * Return: seed size for the random number generator
149 */
70static inline int crypto_rng_seedsize(struct crypto_rng *tfm) 150static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
71{ 151{
72 return crypto_rng_alg(tfm)->seedsize; 152 return crypto_rng_alg(tfm)->seedsize;
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index d45e949699ea..9c8776d0ada8 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -26,6 +26,19 @@
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27 27
28/* 28/*
29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
30 * arbitrary modules to be loaded. Loading from userspace may still need the
31 * unprefixed names, so retains those aliases as well.
32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
34 * expands twice on the same line. Instead, use a separate base name for the
35 * alias.
36 */
37#define MODULE_ALIAS_CRYPTO(name) \
38 __MODULE_INFO(alias, alias_userspace, name); \
39 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
40
41/*
29 * Algorithm masks and types. 42 * Algorithm masks and types.
30 */ 43 */
31#define CRYPTO_ALG_TYPE_MASK 0x0000000f 44#define CRYPTO_ALG_TYPE_MASK 0x0000000f
@@ -127,6 +140,13 @@ struct skcipher_givcrypt_request;
127 140
128typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 141typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
129 142
143/**
144 * DOC: Block Cipher Context Data Structures
145 *
146 * These data structures define the operating context for each block cipher
147 * type.
148 */
149
130struct crypto_async_request { 150struct crypto_async_request {
131 struct list_head list; 151 struct list_head list;
132 crypto_completion_t complete; 152 crypto_completion_t complete;
@@ -194,9 +214,63 @@ struct hash_desc {
194 u32 flags; 214 u32 flags;
195}; 215};
196 216
197/* 217/**
198 * Algorithms: modular crypto algorithm implementations, managed 218 * DOC: Block Cipher Algorithm Definitions
199 * via crypto_register_alg() and crypto_unregister_alg(). 219 *
220 * These data structures define modular crypto algorithm implementations,
221 * managed via crypto_register_alg() and crypto_unregister_alg().
222 */
223
224/**
225 * struct ablkcipher_alg - asynchronous block cipher definition
226 * @min_keysize: Minimum key size supported by the transformation. This is the
227 * smallest key length supported by this transformation algorithm.
228 * This must be set to one of the pre-defined values as this is
229 * not hardware specific. Possible values for this field can be
230 * found via git grep "_MIN_KEY_SIZE" include/crypto/
231 * @max_keysize: Maximum key size supported by the transformation. This is the
232 * largest key length supported by this transformation algorithm.
233 * This must be set to one of the pre-defined values as this is
234 * not hardware specific. Possible values for this field can be
235 * found via git grep "_MAX_KEY_SIZE" include/crypto/
236 * @setkey: Set key for the transformation. This function is used to either
237 * program a supplied key into the hardware or store the key in the
238 * transformation context for programming it later. Note that this
239 * function does modify the transformation context. This function can
240 * be called multiple times during the existence of the transformation
241 * object, so one must make sure the key is properly reprogrammed into
242 * the hardware. This function is also responsible for checking the key
243 * length for validity. In case a software fallback was put in place in
244 * the @cra_init call, this function might need to use the fallback if
245 * the algorithm doesn't support all of the key sizes.
246 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
247 * the supplied scatterlist containing the blocks of data. The crypto
248 * API consumer is responsible for aligning the entries of the
249 * scatterlist properly and making sure the chunks are correctly
250 * sized. In case a software fallback was put in place in the
251 * @cra_init call, this function might need to use the fallback if
252 * the algorithm doesn't support all of the key sizes. In case the
253 * key was stored in transformation context, the key might need to be
254 * re-programmed into the hardware in this function. This function
255 * shall not modify the transformation context, as this function may
256 * be called in parallel with the same transformation object.
257 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
258 * and the conditions are exactly the same.
259 * @givencrypt: Update the IV for encryption. With this function, a cipher
260 * implementation may provide the function on how to update the IV
261 * for encryption.
262 * @givdecrypt: Update the IV for decryption. This is the reverse of
263 * @givencrypt .
264 * @geniv: The transformation implementation may use an "IV generator" provided
265 * by the kernel crypto API. Several use cases have a predefined
266 * approach how IVs are to be updated. For such use cases, the kernel
267 * crypto API provides ready-to-use implementations that can be
268 * referenced with this variable.
269 * @ivsize: IV size applicable for transformation. The consumer must provide an
270 * IV of exactly that size to perform the encrypt or decrypt operation.
271 *
272 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
273 * mandatory and must be filled.
200 */ 274 */
201struct ablkcipher_alg { 275struct ablkcipher_alg {
202 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 276 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -213,6 +287,32 @@ struct ablkcipher_alg {
213 unsigned int ivsize; 287 unsigned int ivsize;
214}; 288};
215 289
290/**
291 * struct aead_alg - AEAD cipher definition
292 * @maxauthsize: Set the maximum authentication tag size supported by the
293 * transformation. A transformation may support smaller tag sizes.
294 * As the authentication tag is a message digest to ensure the
295 * integrity of the encrypted data, a consumer typically wants the
296 * largest authentication tag possible as defined by this
297 * variable.
298 * @setauthsize: Set authentication size for the AEAD transformation. This
299 * function is used to specify the consumer requested size of the
300 * authentication tag to be either generated by the transformation
301 * during encryption or the size of the authentication tag to be
302 * supplied during the decryption operation. This function is also
303 * responsible for checking the authentication tag size for
304 * validity.
305 * @setkey: see struct ablkcipher_alg
306 * @encrypt: see struct ablkcipher_alg
307 * @decrypt: see struct ablkcipher_alg
308 * @givencrypt: see struct ablkcipher_alg
309 * @givdecrypt: see struct ablkcipher_alg
310 * @geniv: see struct ablkcipher_alg
311 * @ivsize: see struct ablkcipher_alg
312 *
313 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
314 * mandatory and must be filled.
315 */
216struct aead_alg { 316struct aead_alg {
217 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 317 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
218 unsigned int keylen); 318 unsigned int keylen);
@@ -228,6 +328,18 @@ struct aead_alg {
228 unsigned int maxauthsize; 328 unsigned int maxauthsize;
229}; 329};
230 330
331/**
332 * struct blkcipher_alg - synchronous block cipher definition
333 * @min_keysize: see struct ablkcipher_alg
334 * @max_keysize: see struct ablkcipher_alg
335 * @setkey: see struct ablkcipher_alg
336 * @encrypt: see struct ablkcipher_alg
337 * @decrypt: see struct ablkcipher_alg
338 * @geniv: see struct ablkcipher_alg
339 * @ivsize: see struct ablkcipher_alg
340 *
341 * All fields except @geniv and @ivsize are mandatory and must be filled.
342 */
231struct blkcipher_alg { 343struct blkcipher_alg {
232 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 344 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
233 unsigned int keylen); 345 unsigned int keylen);
@@ -245,6 +357,53 @@ struct blkcipher_alg {
245 unsigned int ivsize; 357 unsigned int ivsize;
246}; 358};
247 359
360/**
361 * struct cipher_alg - single-block symmetric ciphers definition
362 * @cia_min_keysize: Minimum key size supported by the transformation. This is
363 * the smallest key length supported by this transformation
364 * algorithm. This must be set to one of the pre-defined
365 * values as this is not hardware specific. Possible values
366 * for this field can be found via git grep "_MIN_KEY_SIZE"
367 * include/crypto/
368 * @cia_max_keysize: Maximum key size supported by the transformation. This is
369 * the largest key length supported by this transformation
370 * algorithm. This must be set to one of the pre-defined values
371 * as this is not hardware specific. Possible values for this
372 * field can be found via git grep "_MAX_KEY_SIZE"
373 * include/crypto/
374 * @cia_setkey: Set key for the transformation. This function is used to either
375 * program a supplied key into the hardware or store the key in the
376 * transformation context for programming it later. Note that this
377 * function does modify the transformation context. This function
378 * can be called multiple times during the existence of the
379 * transformation object, so one must make sure the key is properly
380 * reprogrammed into the hardware. This function is also
381 * responsible for checking the key length for validity.
382 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
383 * single block of data, which must be @cra_blocksize big. This
384 * always operates on a full @cra_blocksize and it is not possible
385 * to encrypt a block of smaller size. The supplied buffers must
386 * therefore also be at least of @cra_blocksize size. Both the
387 * input and output buffers are always aligned to @cra_alignmask.
388 * In case either of the input or output buffer supplied by user
389 * of the crypto API is not aligned to @cra_alignmask, the crypto
390 * API will re-align the buffers. The re-alignment means that a
391 * new buffer will be allocated, the data will be copied into the
392 * new buffer, then the processing will happen on the new buffer,
393 * then the data will be copied back into the original buffer and
394 * finally the new buffer will be freed. In case a software
395 * fallback was put in place in the @cra_init call, this function
396 * might need to use the fallback if the algorithm doesn't support
397 * all of the key sizes. In case the key was stored in
398 * transformation context, the key might need to be re-programmed
399 * into the hardware in this function. This function shall not
400 * modify the transformation context, as this function may be
401 * called in parallel with the same transformation object.
402 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
403 * @cia_encrypt, and the conditions are exactly the same.
404 *
405 * All fields are mandatory and must be filled.
406 */
248struct cipher_alg { 407struct cipher_alg {
249 unsigned int cia_min_keysize; 408 unsigned int cia_min_keysize;
250 unsigned int cia_max_keysize; 409 unsigned int cia_max_keysize;
@@ -261,6 +420,25 @@ struct compress_alg {
261 unsigned int slen, u8 *dst, unsigned int *dlen); 420 unsigned int slen, u8 *dst, unsigned int *dlen);
262}; 421};
263 422
423/**
424 * struct rng_alg - random number generator definition
425 * @rng_make_random: The function defined by this variable obtains a random
426 * number. The random number generator transform must generate
427 * the random number out of the context provided with this
428 * call.
429 * @rng_reset: Reset of the random number generator by clearing the entire state.
430 * With the invocation of this function call, the random number
431 * generator shall completely reinitialize its state. If the random
432 * number generator requires a seed for setting up a new state,
433 * the seed must be provided by the consumer while invoking this
434 * function. The required size of the seed is defined with
435 * @seedsize .
436 * @seedsize: The seed size required for a random number generator
437 * initialization defined with this variable. Some random number
438 * generators like the SP800-90A DRBG does not require a seed as the
439 * seeding is implemented internally without the need of support by
440 * the consumer. In this case, the seed size is set to zero.
441 */
264struct rng_alg { 442struct rng_alg {
265 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, 443 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
266 unsigned int dlen); 444 unsigned int dlen);
@@ -277,6 +455,81 @@ struct rng_alg {
277#define cra_compress cra_u.compress 455#define cra_compress cra_u.compress
278#define cra_rng cra_u.rng 456#define cra_rng cra_u.rng
279 457
458/**
459 * struct crypto_alg - definition of a cryptograpic cipher algorithm
460 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
461 * CRYPTO_ALG_* flags for the flags which go in here. Those are
462 * used for fine-tuning the description of the transformation
463 * algorithm.
464 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
465 * of the smallest possible unit which can be transformed with
466 * this algorithm. The users must respect this value.
467 * In case of HASH transformation, it is possible for a smaller
468 * block than @cra_blocksize to be passed to the crypto API for
469 * transformation, in case of any other transformation type, an
470 * error will be returned upon any attempt to transform smaller
471 * than @cra_blocksize chunks.
472 * @cra_ctxsize: Size of the operational context of the transformation. This
473 * value informs the kernel crypto API about the memory size
474 * needed to be allocated for the transformation context.
475 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
476 * buffer containing the input data for the algorithm must be
477 * aligned to this alignment mask. The data buffer for the
478 * output data must be aligned to this alignment mask. Note that
479 * the Crypto API will do the re-alignment in software, but
480 * only under special conditions and there is a performance hit.
481 * The re-alignment happens at these occasions for different
482 * @cra_u types: cipher -- For both input data and output data
483 * buffer; ahash -- For output hash destination buf; shash --
484 * For output hash destination buf.
485 * This is needed on hardware which is flawed by design and
486 * cannot pick data from arbitrary addresses.
487 * @cra_priority: Priority of this transformation implementation. In case
488 * multiple transformations with same @cra_name are available to
489 * the Crypto API, the kernel will use the one with highest
490 * @cra_priority.
491 * @cra_name: Generic name (usable by multiple implementations) of the
492 * transformation algorithm. This is the name of the transformation
493 * itself. This field is used by the kernel when looking up the
494 * providers of particular transformation.
495 * @cra_driver_name: Unique name of the transformation provider. This is the
496 * name of the provider of the transformation. This can be any
497 * arbitrary value, but in the usual case, this contains the
498 * name of the chip or provider and the name of the
499 * transformation algorithm.
500 * @cra_type: Type of the cryptographic transformation. This is a pointer to
501 * struct crypto_type, which implements callbacks common for all
502 * trasnformation types. There are multiple options:
503 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
504 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
505 * This field might be empty. In that case, there are no common
506 * callbacks. This is the case for: cipher, compress, shash.
507 * @cra_u: Callbacks implementing the transformation. This is a union of
508 * multiple structures. Depending on the type of transformation selected
509 * by @cra_type and @cra_flags above, the associated structure must be
510 * filled with callbacks. This field might be empty. This is the case
511 * for ahash, shash.
512 * @cra_init: Initialize the cryptographic transformation object. This function
513 * is used to initialize the cryptographic transformation object.
514 * This function is called only once at the instantiation time, right
515 * after the transformation context was allocated. In case the
516 * cryptographic hardware has some special requirements which need to
517 * be handled by software, this function shall check for the precise
518 * requirement of the transformation and put any software fallbacks
519 * in place.
520 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
521 * counterpart to @cra_init, used to remove various changes set in
522 * @cra_init.
523 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
524 * @cra_list: internally used
525 * @cra_users: internally used
526 * @cra_refcnt: internally used
527 * @cra_destroy: internally used
528 *
529 * The struct crypto_alg describes a generic Crypto API algorithm and is common
530 * for all of the transformations. Any variable not documented here shall not
531 * be used by a cipher implementation as it is internal to the Crypto API.
532 */
280struct crypto_alg { 533struct crypto_alg {
281 struct list_head cra_list; 534 struct list_head cra_list;
282 struct list_head cra_users; 535 struct list_head cra_users;
@@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask)
581 return mask; 834 return mask;
582} 835}
583 836
837/**
838 * DOC: Asynchronous Block Cipher API
839 *
840 * Asynchronous block cipher API is used with the ciphers of type
841 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
842 *
843 * Asynchronous cipher operations imply that the function invocation for a
844 * cipher request returns immediately before the completion of the operation.
845 * The cipher request is scheduled as a separate kernel thread and therefore
846 * load-balanced on the different CPUs via the process scheduler. To allow
847 * the kernel crypto API to inform the caller about the completion of a cipher
848 * request, the caller must provide a callback function. That function is
849 * invoked with the cipher handle when the request completes.
850 *
851 * To support the asynchronous operation, additional information than just the
852 * cipher handle must be supplied to the kernel crypto API. That additional
853 * information is given by filling in the ablkcipher_request data structure.
854 *
855 * For the asynchronous block cipher API, the state is maintained with the tfm
856 * cipher handle. A single tfm can be used across multiple calls and in
857 * parallel. For asynchronous block cipher calls, context data supplied and
858 * only used by the caller can be referenced the request data structure in
859 * addition to the IV used for the cipher request. The maintenance of such
860 * state information would be important for a crypto driver implementer to
861 * have, because when calling the callback function upon completion of the
862 * cipher operation, that callback function may need some information about
863 * which operation just finished if it invoked multiple in parallel. This
864 * state information is unused by the kernel crypto API.
865 */
866
867/**
868 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
869 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
870 * ablkcipher cipher
871 * @type: specifies the type of the cipher
872 * @mask: specifies the mask for the cipher
873 *
874 * Allocate a cipher handle for an ablkcipher. The returned struct
875 * crypto_ablkcipher is the cipher handle that is required for any subsequent
876 * API invocation for that ablkcipher.
877 *
878 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
879 * of an error, PTR_ERR() returns the error code.
880 */
584struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, 881struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
585 u32 type, u32 mask); 882 u32 type, u32 mask);
586 883
@@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm(
590 return &tfm->base; 887 return &tfm->base;
591} 888}
592 889
890/**
891 * crypto_free_ablkcipher() - zeroize and free cipher handle
892 * @tfm: cipher handle to be freed
893 */
593static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 894static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
594{ 895{
595 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 896 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
596} 897}
597 898
899/**
900 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
901 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
902 * ablkcipher
903 * @type: specifies the type of the cipher
904 * @mask: specifies the mask for the cipher
905 *
906 * Return: true when the ablkcipher is known to the kernel crypto API; false
907 * otherwise
908 */
598static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, 909static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
599 u32 mask) 910 u32 mask)
600{ 911{
@@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
608 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 919 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
609} 920}
610 921
922/**
923 * crypto_ablkcipher_ivsize() - obtain IV size
924 * @tfm: cipher handle
925 *
926 * The size of the IV for the ablkcipher referenced by the cipher handle is
927 * returned. This IV size may be zero if the cipher does not need an IV.
928 *
929 * Return: IV size in bytes
930 */
611static inline unsigned int crypto_ablkcipher_ivsize( 931static inline unsigned int crypto_ablkcipher_ivsize(
612 struct crypto_ablkcipher *tfm) 932 struct crypto_ablkcipher *tfm)
613{ 933{
614 return crypto_ablkcipher_crt(tfm)->ivsize; 934 return crypto_ablkcipher_crt(tfm)->ivsize;
615} 935}
616 936
937/**
938 * crypto_ablkcipher_blocksize() - obtain block size of cipher
939 * @tfm: cipher handle
940 *
941 * The block size for the ablkcipher referenced with the cipher handle is
942 * returned. The caller may use that information to allocate appropriate
943 * memory for the data returned by the encryption or decryption operation
944 *
945 * Return: block size of cipher
946 */
617static inline unsigned int crypto_ablkcipher_blocksize( 947static inline unsigned int crypto_ablkcipher_blocksize(
618 struct crypto_ablkcipher *tfm) 948 struct crypto_ablkcipher *tfm)
619{ 949{
@@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
643 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 973 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
644} 974}
645 975
976/**
977 * crypto_ablkcipher_setkey() - set key for cipher
978 * @tfm: cipher handle
979 * @key: buffer holding the key
980 * @keylen: length of the key in bytes
981 *
982 * The caller provided key is set for the ablkcipher referenced by the cipher
983 * handle.
984 *
985 * Note, the key length determines the cipher type. Many block ciphers implement
986 * different cipher modes depending on the key size, such as AES-128 vs AES-192
987 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
988 * is performed.
989 *
990 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
991 */
646static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 992static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
647 const u8 *key, unsigned int keylen) 993 const u8 *key, unsigned int keylen)
648{ 994{
@@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
651 return crt->setkey(crt->base, key, keylen); 997 return crt->setkey(crt->base, key, keylen);
652} 998}
653 999
1000/**
1001 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
1002 * @req: ablkcipher_request out of which the cipher handle is to be obtained
1003 *
1004 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
1005 * data structure.
1006 *
1007 * Return: crypto_ablkcipher handle
1008 */
654static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 1009static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
655 struct ablkcipher_request *req) 1010 struct ablkcipher_request *req)
656{ 1011{
657 return __crypto_ablkcipher_cast(req->base.tfm); 1012 return __crypto_ablkcipher_cast(req->base.tfm);
658} 1013}
659 1014
1015/**
1016 * crypto_ablkcipher_encrypt() - encrypt plaintext
1017 * @req: reference to the ablkcipher_request handle that holds all information
1018 * needed to perform the cipher operation
1019 *
1020 * Encrypt plaintext data using the ablkcipher_request handle. That data
1021 * structure and how it is filled with data is discussed with the
1022 * ablkcipher_request_* functions.
1023 *
1024 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1025 */
660static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 1026static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
661{ 1027{
662 struct ablkcipher_tfm *crt = 1028 struct ablkcipher_tfm *crt =
@@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
664 return crt->encrypt(req); 1030 return crt->encrypt(req);
665} 1031}
666 1032
1033/**
1034 * crypto_ablkcipher_decrypt() - decrypt ciphertext
1035 * @req: reference to the ablkcipher_request handle that holds all information
1036 * needed to perform the cipher operation
1037 *
1038 * Decrypt ciphertext data using the ablkcipher_request handle. That data
1039 * structure and how it is filled with data is discussed with the
1040 * ablkcipher_request_* functions.
1041 *
1042 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1043 */
667static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 1044static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
668{ 1045{
669 struct ablkcipher_tfm *crt = 1046 struct ablkcipher_tfm *crt =
@@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
671 return crt->decrypt(req); 1048 return crt->decrypt(req);
672} 1049}
673 1050
1051/**
1052 * DOC: Asynchronous Cipher Request Handle
1053 *
1054 * The ablkcipher_request data structure contains all pointers to data
1055 * required for the asynchronous cipher operation. This includes the cipher
1056 * handle (which can be used by multiple ablkcipher_request instances), pointer
1057 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
1058 * as a handle to the ablkcipher_request_* API calls in a similar way as
1059 * ablkcipher handle to the crypto_ablkcipher_* API calls.
1060 */
1061
1062/**
1063 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
1064 * @tfm: cipher handle
1065 *
1066 * Return: number of bytes
1067 */
674static inline unsigned int crypto_ablkcipher_reqsize( 1068static inline unsigned int crypto_ablkcipher_reqsize(
675 struct crypto_ablkcipher *tfm) 1069 struct crypto_ablkcipher *tfm)
676{ 1070{
677 return crypto_ablkcipher_crt(tfm)->reqsize; 1071 return crypto_ablkcipher_crt(tfm)->reqsize;
678} 1072}
679 1073
1074/**
1075 * ablkcipher_request_set_tfm() - update cipher handle reference in request
1076 * @req: request handle to be modified
1077 * @tfm: cipher handle that shall be added to the request handle
1078 *
1079 * Allow the caller to replace the existing ablkcipher handle in the request
1080 * data structure with a different one.
1081 */
680static inline void ablkcipher_request_set_tfm( 1082static inline void ablkcipher_request_set_tfm(
681 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 1083 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
682{ 1084{
@@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
689 return container_of(req, struct ablkcipher_request, base); 1091 return container_of(req, struct ablkcipher_request, base);
690} 1092}
691 1093
1094/**
1095 * ablkcipher_request_alloc() - allocate request data structure
1096 * @tfm: cipher handle to be registered with the request
1097 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1098 *
1099 * Allocate the request data structure that must be used with the ablkcipher
1100 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
1101 * handle is registered in the request data structure.
1102 *
1103 * Return: allocated request handle in case of success; IS_ERR() is true in case
1104 * of an error, PTR_ERR() returns the error code.
1105 */
692static inline struct ablkcipher_request *ablkcipher_request_alloc( 1106static inline struct ablkcipher_request *ablkcipher_request_alloc(
693 struct crypto_ablkcipher *tfm, gfp_t gfp) 1107 struct crypto_ablkcipher *tfm, gfp_t gfp)
694{ 1108{
@@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
703 return req; 1117 return req;
704} 1118}
705 1119
1120/**
1121 * ablkcipher_request_free() - zeroize and free request data structure
1122 * @req: request data structure cipher handle to be freed
1123 */
706static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1124static inline void ablkcipher_request_free(struct ablkcipher_request *req)
707{ 1125{
708 kzfree(req); 1126 kzfree(req);
709} 1127}
710 1128
1129/**
1130 * ablkcipher_request_set_callback() - set asynchronous callback function
1131 * @req: request handle
1132 * @flags: specify zero or an ORing of the flags
1133 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1134 * increase the wait queue beyond the initial maximum size;
1135 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1136 * @compl: callback function pointer to be registered with the request handle
1137 * @data: The data pointer refers to memory that is not used by the kernel
1138 * crypto API, but provided to the callback function for it to use. Here,
1139 * the caller can provide a reference to memory the callback function can
1140 * operate on. As the callback function is invoked asynchronously to the
1141 * related functionality, it may need to access data structures of the
1142 * related functionality which can be referenced using this pointer. The
1143 * callback function can access the memory via the "data" field in the
1144 * crypto_async_request data structure provided to the callback function.
1145 *
1146 * This function allows setting the callback function that is triggered once the
1147 * cipher operation completes.
1148 *
1149 * The callback function is registered with the ablkcipher_request handle and
1150 * must comply with the following template:
1151 *
1152 * void callback_function(struct crypto_async_request *req, int error)
1153 */
711static inline void ablkcipher_request_set_callback( 1154static inline void ablkcipher_request_set_callback(
712 struct ablkcipher_request *req, 1155 struct ablkcipher_request *req,
713 u32 flags, crypto_completion_t compl, void *data) 1156 u32 flags, crypto_completion_t compl, void *data)
@@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback(
717 req->base.flags = flags; 1160 req->base.flags = flags;
718} 1161}
719 1162
1163/**
1164 * ablkcipher_request_set_crypt() - set data buffers
1165 * @req: request handle
1166 * @src: source scatter / gather list
1167 * @dst: destination scatter / gather list
1168 * @nbytes: number of bytes to process from @src
1169 * @iv: IV for the cipher operation which must comply with the IV size defined
1170 * by crypto_ablkcipher_ivsize
1171 *
1172 * This function allows setting of the source data and destination data
1173 * scatter / gather lists.
1174 *
1175 * For encryption, the source is treated as the plaintext and the
1176 * destination is the ciphertext. For a decryption operation, the use is
1177 * reversed: the source is the ciphertext and the destination is the plaintext.
1178 */
720static inline void ablkcipher_request_set_crypt( 1179static inline void ablkcipher_request_set_crypt(
721 struct ablkcipher_request *req, 1180 struct ablkcipher_request *req,
722 struct scatterlist *src, struct scatterlist *dst, 1181 struct scatterlist *src, struct scatterlist *dst,
@@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt(
728 req->info = iv; 1187 req->info = iv;
729} 1188}
730 1189
1190/**
1191 * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
1192 *
1193 * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
1194 * (listed as type "aead" in /proc/crypto)
1195 *
1196 * The most prominent examples for this type of encryption is GCM and CCM.
1197 * However, the kernel supports other types of AEAD ciphers which are defined
1198 * with the following cipher string:
1199 *
1200 * authenc(keyed message digest, block cipher)
1201 *
1202 * For example: authenc(hmac(sha256), cbc(aes))
1203 *
1204 * The example code provided for the asynchronous block cipher operation
1205 * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
1206 * the *aead* pendants discussed in the following. In addtion, for the AEAD
1207 * operation, the aead_request_set_assoc function must be used to set the
1208 * pointer to the associated data memory location before performing the
1209 * encryption or decryption operation. In case of an encryption, the associated
1210 * data memory is filled during the encryption operation. For decryption, the
1211 * associated data memory must contain data that is used to verify the integrity
1212 * of the decrypted data. Another deviation from the asynchronous block cipher
1213 * operation is that the caller should explicitly check for -EBADMSG of the
1214 * crypto_aead_decrypt. That error indicates an authentication error, i.e.
1215 * a breach in the integrity of the message. In essence, that -EBADMSG error
1216 * code is the key bonus an AEAD cipher has over "standard" block chaining
1217 * modes.
1218 */
1219
731static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) 1220static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
732{ 1221{
733 return (struct crypto_aead *)tfm; 1222 return (struct crypto_aead *)tfm;
734} 1223}
735 1224
1225/**
1226 * crypto_alloc_aead() - allocate AEAD cipher handle
1227 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1228 * AEAD cipher
1229 * @type: specifies the type of the cipher
1230 * @mask: specifies the mask for the cipher
1231 *
1232 * Allocate a cipher handle for an AEAD. The returned struct
1233 * crypto_aead is the cipher handle that is required for any subsequent
1234 * API invocation for that AEAD.
1235 *
1236 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1237 * of an error, PTR_ERR() returns the error code.
1238 */
736struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); 1239struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
737 1240
738static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) 1241static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
@@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
740 return &tfm->base; 1243 return &tfm->base;
741} 1244}
742 1245
1246/**
1247 * crypto_free_aead() - zeroize and free aead handle
1248 * @tfm: cipher handle to be freed
1249 */
743static inline void crypto_free_aead(struct crypto_aead *tfm) 1250static inline void crypto_free_aead(struct crypto_aead *tfm)
744{ 1251{
745 crypto_free_tfm(crypto_aead_tfm(tfm)); 1252 crypto_free_tfm(crypto_aead_tfm(tfm));
@@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
750 return &crypto_aead_tfm(tfm)->crt_aead; 1257 return &crypto_aead_tfm(tfm)->crt_aead;
751} 1258}
752 1259
1260/**
1261 * crypto_aead_ivsize() - obtain IV size
1262 * @tfm: cipher handle
1263 *
1264 * The size of the IV for the aead referenced by the cipher handle is
1265 * returned. This IV size may be zero if the cipher does not need an IV.
1266 *
1267 * Return: IV size in bytes
1268 */
753static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) 1269static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
754{ 1270{
755 return crypto_aead_crt(tfm)->ivsize; 1271 return crypto_aead_crt(tfm)->ivsize;
756} 1272}
757 1273
1274/**
1275 * crypto_aead_authsize() - obtain maximum authentication data size
1276 * @tfm: cipher handle
1277 *
1278 * The maximum size of the authentication data for the AEAD cipher referenced
1279 * by the AEAD cipher handle is returned. The authentication data size may be
1280 * zero if the cipher implements a hard-coded maximum.
1281 *
1282 * The authentication data may also be known as "tag value".
1283 *
1284 * Return: authentication data size / tag size in bytes
1285 */
758static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) 1286static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
759{ 1287{
760 return crypto_aead_crt(tfm)->authsize; 1288 return crypto_aead_crt(tfm)->authsize;
761} 1289}
762 1290
1291/**
1292 * crypto_aead_blocksize() - obtain block size of cipher
1293 * @tfm: cipher handle
1294 *
1295 * The block size for the AEAD referenced with the cipher handle is returned.
1296 * The caller may use that information to allocate appropriate memory for the
1297 * data returned by the encryption or decryption operation
1298 *
1299 * Return: block size of cipher
1300 */
763static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) 1301static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
764{ 1302{
765 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); 1303 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
@@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
785 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); 1323 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
786} 1324}
787 1325
1326/**
1327 * crypto_aead_setkey() - set key for cipher
1328 * @tfm: cipher handle
1329 * @key: buffer holding the key
1330 * @keylen: length of the key in bytes
1331 *
1332 * The caller provided key is set for the AEAD referenced by the cipher
1333 * handle.
1334 *
1335 * Note, the key length determines the cipher type. Many block ciphers implement
1336 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1337 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1338 * is performed.
1339 *
1340 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1341 */
788static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, 1342static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
789 unsigned int keylen) 1343 unsigned int keylen)
790{ 1344{
@@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
793 return crt->setkey(crt->base, key, keylen); 1347 return crt->setkey(crt->base, key, keylen);
794} 1348}
795 1349
1350/**
1351 * crypto_aead_setauthsize() - set authentication data size
1352 * @tfm: cipher handle
1353 * @authsize: size of the authentication data / tag in bytes
1354 *
1355 * Set the authentication data size / tag size. AEAD requires an authentication
1356 * tag (or MAC) in addition to the associated data.
1357 *
1358 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1359 */
796int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); 1360int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
797 1361
798static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) 1362static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
@@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
800 return __crypto_aead_cast(req->base.tfm); 1364 return __crypto_aead_cast(req->base.tfm);
801} 1365}
802 1366
1367/**
1368 * crypto_aead_encrypt() - encrypt plaintext
1369 * @req: reference to the aead_request handle that holds all information
1370 * needed to perform the cipher operation
1371 *
1372 * Encrypt plaintext data using the aead_request handle. That data structure
1373 * and how it is filled with data is discussed with the aead_request_*
1374 * functions.
1375 *
1376 * IMPORTANT NOTE The encryption operation creates the authentication data /
1377 * tag. That data is concatenated with the created ciphertext.
1378 * The ciphertext memory size is therefore the given number of
1379 * block cipher blocks + the size defined by the
1380 * crypto_aead_setauthsize invocation. The caller must ensure
1381 * that sufficient memory is available for the ciphertext and
1382 * the authentication tag.
1383 *
1384 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1385 */
803static inline int crypto_aead_encrypt(struct aead_request *req) 1386static inline int crypto_aead_encrypt(struct aead_request *req)
804{ 1387{
805 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); 1388 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
806} 1389}
807 1390
1391/**
1392 * crypto_aead_decrypt() - decrypt ciphertext
1393 * @req: reference to the ablkcipher_request handle that holds all information
1394 * needed to perform the cipher operation
1395 *
1396 * Decrypt ciphertext data using the aead_request handle. That data structure
1397 * and how it is filled with data is discussed with the aead_request_*
1398 * functions.
1399 *
1400 * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
1401 * authentication data / tag. That authentication data / tag
1402 * must have the size defined by the crypto_aead_setauthsize
1403 * invocation.
1404 *
1405 *
1406 * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
1407 * cipher operation performs the authentication of the data during the
1408 * decryption operation. Therefore, the function returns this error if
1409 * the authentication of the ciphertext was unsuccessful (i.e. the
1410 * integrity of the ciphertext or the associated data was violated);
1411 * < 0 if an error occurred.
1412 */
808static inline int crypto_aead_decrypt(struct aead_request *req) 1413static inline int crypto_aead_decrypt(struct aead_request *req)
809{ 1414{
810 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); 1415 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
811} 1416}
812 1417
1418/**
1419 * DOC: Asynchronous AEAD Request Handle
1420 *
1421 * The aead_request data structure contains all pointers to data required for
1422 * the AEAD cipher operation. This includes the cipher handle (which can be
1423 * used by multiple aead_request instances), pointer to plaintext and
1424 * ciphertext, asynchronous callback function, etc. It acts as a handle to the
1425 * aead_request_* API calls in a similar way as AEAD handle to the
1426 * crypto_aead_* API calls.
1427 */
1428
1429/**
1430 * crypto_aead_reqsize() - obtain size of the request data structure
1431 * @tfm: cipher handle
1432 *
1433 * Return: number of bytes
1434 */
813static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) 1435static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
814{ 1436{
815 return crypto_aead_crt(tfm)->reqsize; 1437 return crypto_aead_crt(tfm)->reqsize;
816} 1438}
817 1439
1440/**
1441 * aead_request_set_tfm() - update cipher handle reference in request
1442 * @req: request handle to be modified
1443 * @tfm: cipher handle that shall be added to the request handle
1444 *
1445 * Allow the caller to replace the existing aead handle in the request
1446 * data structure with a different one.
1447 */
818static inline void aead_request_set_tfm(struct aead_request *req, 1448static inline void aead_request_set_tfm(struct aead_request *req,
819 struct crypto_aead *tfm) 1449 struct crypto_aead *tfm)
820{ 1450{
821 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); 1451 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
822} 1452}
823 1453
1454/**
1455 * aead_request_alloc() - allocate request data structure
1456 * @tfm: cipher handle to be registered with the request
1457 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1458 *
1459 * Allocate the request data structure that must be used with the AEAD
1460 * encrypt and decrypt API calls. During the allocation, the provided aead
1461 * handle is registered in the request data structure.
1462 *
1463 * Return: allocated request handle in case of success; IS_ERR() is true in case
1464 * of an error, PTR_ERR() returns the error code.
1465 */
824static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, 1466static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
825 gfp_t gfp) 1467 gfp_t gfp)
826{ 1468{
@@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
834 return req; 1476 return req;
835} 1477}
836 1478
1479/**
1480 * aead_request_free() - zeroize and free request data structure
1481 * @req: request data structure cipher handle to be freed
1482 */
837static inline void aead_request_free(struct aead_request *req) 1483static inline void aead_request_free(struct aead_request *req)
838{ 1484{
839 kzfree(req); 1485 kzfree(req);
840} 1486}
841 1487
1488/**
1489 * aead_request_set_callback() - set asynchronous callback function
1490 * @req: request handle
1491 * @flags: specify zero or an ORing of the flags
1492 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1493 * increase the wait queue beyond the initial maximum size;
1494 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1495 * @compl: callback function pointer to be registered with the request handle
1496 * @data: The data pointer refers to memory that is not used by the kernel
1497 * crypto API, but provided to the callback function for it to use. Here,
1498 * the caller can provide a reference to memory the callback function can
1499 * operate on. As the callback function is invoked asynchronously to the
1500 * related functionality, it may need to access data structures of the
1501 * related functionality which can be referenced using this pointer. The
1502 * callback function can access the memory via the "data" field in the
1503 * crypto_async_request data structure provided to the callback function.
1504 *
1505 * Setting the callback function that is triggered once the cipher operation
1506 * completes
1507 *
1508 * The callback function is registered with the aead_request handle and
1509 * must comply with the following template:
1510 *
1511 * void callback_function(struct crypto_async_request *req, int error)
1512 */
842static inline void aead_request_set_callback(struct aead_request *req, 1513static inline void aead_request_set_callback(struct aead_request *req,
843 u32 flags, 1514 u32 flags,
844 crypto_completion_t compl, 1515 crypto_completion_t compl,
@@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req,
849 req->base.flags = flags; 1520 req->base.flags = flags;
850} 1521}
851 1522
1523/**
1524 * aead_request_set_crypt - set data buffers
1525 * @req: request handle
1526 * @src: source scatter / gather list
1527 * @dst: destination scatter / gather list
1528 * @cryptlen: number of bytes to process from @src
1529 * @iv: IV for the cipher operation which must comply with the IV size defined
1530 * by crypto_aead_ivsize()
1531 *
1532 * Setting the source data and destination data scatter / gather lists.
1533 *
1534 * For encryption, the source is treated as the plaintext and the
1535 * destination is the ciphertext. For a decryption operation, the use is
1536 * reversed: the source is the ciphertext and the destination is the plaintext.
1537 *
1538 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
1539 * the caller must concatenate the ciphertext followed by the
1540 * authentication tag and provide the entire data stream to the
1541 * decryption operation (i.e. the data length used for the
1542 * initialization of the scatterlist and the data length for the
1543 * decryption operation is identical). For encryption, however,
1544 * the authentication tag is created while encrypting the data.
1545 * The destination buffer must hold sufficient space for the
1546 * ciphertext and the authentication tag while the encryption
1547 * invocation must only point to the plaintext data size. The
1548 * following code snippet illustrates the memory usage
1549 * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
1550 * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
1551 * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
1552 */
852static inline void aead_request_set_crypt(struct aead_request *req, 1553static inline void aead_request_set_crypt(struct aead_request *req,
853 struct scatterlist *src, 1554 struct scatterlist *src,
854 struct scatterlist *dst, 1555 struct scatterlist *dst,
@@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req,
860 req->iv = iv; 1561 req->iv = iv;
861} 1562}
862 1563
1564/**
1565 * aead_request_set_assoc() - set the associated data scatter / gather list
1566 * @req: request handle
1567 * @assoc: associated data scatter / gather list
1568 * @assoclen: number of bytes to process from @assoc
1569 *
1570 * For encryption, the memory is filled with the associated data. For
1571 * decryption, the memory must point to the associated data.
1572 */
863static inline void aead_request_set_assoc(struct aead_request *req, 1573static inline void aead_request_set_assoc(struct aead_request *req,
864 struct scatterlist *assoc, 1574 struct scatterlist *assoc,
865 unsigned int assoclen) 1575 unsigned int assoclen)
@@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req,
868 req->assoclen = assoclen; 1578 req->assoclen = assoclen;
869} 1579}
870 1580
1581/**
1582 * DOC: Synchronous Block Cipher API
1583 *
1584 * The synchronous block cipher API is used with the ciphers of type
1585 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1586 *
1587 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1588 * used in multiple calls and in parallel, this info should not be changeable
1589 * (unless a lock is used). This applies, for example, to the symmetric key.
1590 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1591 * structure for synchronous blkcipher api. So, its the only state info that can
1592 * be kept for synchronous calls without using a big lock across a tfm.
1593 *
1594 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1595 * consisting of a template (a block chaining mode) and a single block cipher
1596 * primitive (e.g. AES).
1597 *
1598 * The plaintext data buffer and the ciphertext data buffer are pointed to
1599 * by using scatter/gather lists. The cipher operation is performed
1600 * on all segments of the provided scatter/gather lists.
1601 *
1602 * The kernel crypto API supports a cipher operation "in-place" which means that
1603 * the caller may provide the same scatter/gather list for the plaintext and
1604 * cipher text. After the completion of the cipher operation, the plaintext
1605 * data is replaced with the ciphertext data in case of an encryption and vice
1606 * versa for a decryption. The caller must ensure that the scatter/gather lists
1607 * for the output data point to sufficiently large buffers, i.e. multiples of
1608 * the block size of the cipher.
1609 */
1610
871static inline struct crypto_blkcipher *__crypto_blkcipher_cast( 1611static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
872 struct crypto_tfm *tfm) 1612 struct crypto_tfm *tfm)
873{ 1613{
@@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
881 return __crypto_blkcipher_cast(tfm); 1621 return __crypto_blkcipher_cast(tfm);
882} 1622}
883 1623
1624/**
1625 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1626 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1627 * blkcipher cipher
1628 * @type: specifies the type of the cipher
1629 * @mask: specifies the mask for the cipher
1630 *
1631 * Allocate a cipher handle for a block cipher. The returned struct
1632 * crypto_blkcipher is the cipher handle that is required for any subsequent
1633 * API invocation for that block cipher.
1634 *
1635 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1636 * of an error, PTR_ERR() returns the error code.
1637 */
884static inline struct crypto_blkcipher *crypto_alloc_blkcipher( 1638static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
885 const char *alg_name, u32 type, u32 mask) 1639 const char *alg_name, u32 type, u32 mask)
886{ 1640{
@@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm(
897 return &tfm->base; 1651 return &tfm->base;
898} 1652}
899 1653
1654/**
1655 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1656 * @tfm: cipher handle to be freed
1657 */
900static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) 1658static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
901{ 1659{
902 crypto_free_tfm(crypto_blkcipher_tfm(tfm)); 1660 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
903} 1661}
904 1662
1663/**
1664 * crypto_has_blkcipher() - Search for the availability of a block cipher
1665 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1666 * block cipher
1667 * @type: specifies the type of the cipher
1668 * @mask: specifies the mask for the cipher
1669 *
1670 * Return: true when the block cipher is known to the kernel crypto API; false
1671 * otherwise
1672 */
905static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) 1673static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
906{ 1674{
907 type &= ~CRYPTO_ALG_TYPE_MASK; 1675 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
911 return crypto_has_alg(alg_name, type, mask); 1679 return crypto_has_alg(alg_name, type, mask);
912} 1680}
913 1681
1682/**
1683 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1684 * @tfm: cipher handle
1685 *
1686 * Return: The character string holding the name of the cipher
1687 */
914static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) 1688static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
915{ 1689{
916 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); 1690 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
@@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg(
928 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; 1702 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
929} 1703}
930 1704
1705/**
1706 * crypto_blkcipher_ivsize() - obtain IV size
1707 * @tfm: cipher handle
1708 *
1709 * The size of the IV for the block cipher referenced by the cipher handle is
1710 * returned. This IV size may be zero if the cipher does not need an IV.
1711 *
1712 * Return: IV size in bytes
1713 */
931static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) 1714static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
932{ 1715{
933 return crypto_blkcipher_alg(tfm)->ivsize; 1716 return crypto_blkcipher_alg(tfm)->ivsize;
934} 1717}
935 1718
1719/**
1720 * crypto_blkcipher_blocksize() - obtain block size of cipher
1721 * @tfm: cipher handle
1722 *
1723 * The block size for the block cipher referenced with the cipher handle is
1724 * returned. The caller may use that information to allocate appropriate
1725 * memory for the data returned by the encryption or decryption operation.
1726 *
1727 * Return: block size of cipher
1728 */
936static inline unsigned int crypto_blkcipher_blocksize( 1729static inline unsigned int crypto_blkcipher_blocksize(
937 struct crypto_blkcipher *tfm) 1730 struct crypto_blkcipher *tfm)
938{ 1731{
@@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
962 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); 1755 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
963} 1756}
964 1757
1758/**
1759 * crypto_blkcipher_setkey() - set key for cipher
1760 * @tfm: cipher handle
1761 * @key: buffer holding the key
1762 * @keylen: length of the key in bytes
1763 *
1764 * The caller provided key is set for the block cipher referenced by the cipher
1765 * handle.
1766 *
1767 * Note, the key length determines the cipher type. Many block ciphers implement
1768 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1769 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1770 * is performed.
1771 *
1772 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1773 */
965static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, 1774static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
966 const u8 *key, unsigned int keylen) 1775 const u8 *key, unsigned int keylen)
967{ 1776{
@@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
969 key, keylen); 1778 key, keylen);
970} 1779}
971 1780
1781/**
1782 * crypto_blkcipher_encrypt() - encrypt plaintext
1783 * @desc: reference to the block cipher handle with meta data
1784 * @dst: scatter/gather list that is filled by the cipher operation with the
1785 * ciphertext
1786 * @src: scatter/gather list that holds the plaintext
1787 * @nbytes: number of bytes of the plaintext to encrypt.
1788 *
1789 * Encrypt plaintext data using the IV set by the caller with a preceding
1790 * call of crypto_blkcipher_set_iv.
1791 *
1792 * The blkcipher_desc data structure must be filled by the caller and can
1793 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1794 * with the block cipher handle; desc.flags is filled with either
1795 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1796 *
1797 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1798 */
972static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, 1799static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
973 struct scatterlist *dst, 1800 struct scatterlist *dst,
974 struct scatterlist *src, 1801 struct scatterlist *src,
@@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
978 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1805 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
979} 1806}
980 1807
1808/**
1809 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1810 * @desc: reference to the block cipher handle with meta data
1811 * @dst: scatter/gather list that is filled by the cipher operation with the
1812 * ciphertext
1813 * @src: scatter/gather list that holds the plaintext
1814 * @nbytes: number of bytes of the plaintext to encrypt.
1815 *
1816 * Encrypt plaintext data with the use of an IV that is solely used for this
1817 * cipher operation. Any previously set IV is not used.
1818 *
1819 * The blkcipher_desc data structure must be filled by the caller and can
1820 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1821 * with the block cipher handle; desc.info is filled with the IV to be used for
1822 * the current operation; desc.flags is filled with either
1823 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1824 *
1825 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1826 */
981static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, 1827static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
982 struct scatterlist *dst, 1828 struct scatterlist *dst,
983 struct scatterlist *src, 1829 struct scatterlist *src,
@@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
986 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1832 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
987} 1833}
988 1834
1835/**
1836 * crypto_blkcipher_decrypt() - decrypt ciphertext
1837 * @desc: reference to the block cipher handle with meta data
1838 * @dst: scatter/gather list that is filled by the cipher operation with the
1839 * plaintext
1840 * @src: scatter/gather list that holds the ciphertext
1841 * @nbytes: number of bytes of the ciphertext to decrypt.
1842 *
1843 * Decrypt ciphertext data using the IV set by the caller with a preceding
1844 * call of crypto_blkcipher_set_iv.
1845 *
1846 * The blkcipher_desc data structure must be filled by the caller as documented
1847 * for the crypto_blkcipher_encrypt call above.
1848 *
1849 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1850 *
1851 */
989static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, 1852static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
990 struct scatterlist *dst, 1853 struct scatterlist *dst,
991 struct scatterlist *src, 1854 struct scatterlist *src,
@@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
995 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1858 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
996} 1859}
997 1860
1861/**
1862 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1863 * @desc: reference to the block cipher handle with meta data
1864 * @dst: scatter/gather list that is filled by the cipher operation with the
1865 * plaintext
1866 * @src: scatter/gather list that holds the ciphertext
1867 * @nbytes: number of bytes of the ciphertext to decrypt.
1868 *
1869 * Decrypt ciphertext data with the use of an IV that is solely used for this
1870 * cipher operation. Any previously set IV is not used.
1871 *
1872 * The blkcipher_desc data structure must be filled by the caller as documented
1873 * for the crypto_blkcipher_encrypt_iv call above.
1874 *
1875 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1876 */
998static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, 1877static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
999 struct scatterlist *dst, 1878 struct scatterlist *dst,
1000 struct scatterlist *src, 1879 struct scatterlist *src,
@@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1003 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1882 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1004} 1883}
1005 1884
1885/**
1886 * crypto_blkcipher_set_iv() - set IV for cipher
1887 * @tfm: cipher handle
1888 * @src: buffer holding the IV
1889 * @len: length of the IV in bytes
1890 *
1891 * The caller provided IV is set for the block cipher referenced by the cipher
1892 * handle.
1893 */
1006static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, 1894static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1007 const u8 *src, unsigned int len) 1895 const u8 *src, unsigned int len)
1008{ 1896{
1009 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); 1897 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1010} 1898}
1011 1899
1900/**
1901 * crypto_blkcipher_get_iv() - obtain IV from cipher
1902 * @tfm: cipher handle
1903 * @dst: buffer filled with the IV
1904 * @len: length of the buffer dst
1905 *
1906 * The caller can obtain the IV set for the block cipher referenced by the
1907 * cipher handle and store it into the user-provided buffer. If the buffer
1908 * has an insufficient space, the IV is truncated to fit the buffer.
1909 */
1012static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, 1910static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1013 u8 *dst, unsigned int len) 1911 u8 *dst, unsigned int len)
1014{ 1912{
1015 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); 1913 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1016} 1914}
1017 1915
1916/**
1917 * DOC: Single Block Cipher API
1918 *
1919 * The single block cipher API is used with the ciphers of type
1920 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1921 *
1922 * Using the single block cipher API calls, operations with the basic cipher
1923 * primitive can be implemented. These cipher primitives exclude any block
1924 * chaining operations including IV handling.
1925 *
1926 * The purpose of this single block cipher API is to support the implementation
1927 * of templates or other concepts that only need to perform the cipher operation
1928 * on one block at a time. Templates invoke the underlying cipher primitive
1929 * block-wise and process either the input or the output data of these cipher
1930 * operations.
1931 */
1932
1018static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 1933static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1019{ 1934{
1020 return (struct crypto_cipher *)tfm; 1935 return (struct crypto_cipher *)tfm;
@@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1026 return __crypto_cipher_cast(tfm); 1941 return __crypto_cipher_cast(tfm);
1027} 1942}
1028 1943
1944/**
1945 * crypto_alloc_cipher() - allocate single block cipher handle
1946 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1947 * single block cipher
1948 * @type: specifies the type of the cipher
1949 * @mask: specifies the mask for the cipher
1950 *
1951 * Allocate a cipher handle for a single block cipher. The returned struct
1952 * crypto_cipher is the cipher handle that is required for any subsequent API
1953 * invocation for that single block cipher.
1954 *
1955 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1956 * of an error, PTR_ERR() returns the error code.
1957 */
1029static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 1958static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1030 u32 type, u32 mask) 1959 u32 type, u32 mask)
1031{ 1960{
@@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1041 return &tfm->base; 1970 return &tfm->base;
1042} 1971}
1043 1972
1973/**
1974 * crypto_free_cipher() - zeroize and free the single block cipher handle
1975 * @tfm: cipher handle to be freed
1976 */
1044static inline void crypto_free_cipher(struct crypto_cipher *tfm) 1977static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1045{ 1978{
1046 crypto_free_tfm(crypto_cipher_tfm(tfm)); 1979 crypto_free_tfm(crypto_cipher_tfm(tfm));
1047} 1980}
1048 1981
1982/**
1983 * crypto_has_cipher() - Search for the availability of a single block cipher
1984 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1985 * single block cipher
1986 * @type: specifies the type of the cipher
1987 * @mask: specifies the mask for the cipher
1988 *
1989 * Return: true when the single block cipher is known to the kernel crypto API;
1990 * false otherwise
1991 */
1049static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 1992static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1050{ 1993{
1051 type &= ~CRYPTO_ALG_TYPE_MASK; 1994 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1060 return &crypto_cipher_tfm(tfm)->crt_cipher; 2003 return &crypto_cipher_tfm(tfm)->crt_cipher;
1061} 2004}
1062 2005
2006/**
2007 * crypto_cipher_blocksize() - obtain block size for cipher
2008 * @tfm: cipher handle
2009 *
2010 * The block size for the single block cipher referenced with the cipher handle
2011 * tfm is returned. The caller may use that information to allocate appropriate
2012 * memory for the data returned by the encryption or decryption operation
2013 *
2014 * Return: block size of cipher
2015 */
1063static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 2016static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1064{ 2017{
1065 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 2018 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
@@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1087 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 2040 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1088} 2041}
1089 2042
2043/**
2044 * crypto_cipher_setkey() - set key for cipher
2045 * @tfm: cipher handle
2046 * @key: buffer holding the key
2047 * @keylen: length of the key in bytes
2048 *
2049 * The caller provided key is set for the single block cipher referenced by the
2050 * cipher handle.
2051 *
2052 * Note, the key length determines the cipher type. Many block ciphers implement
2053 * different cipher modes depending on the key size, such as AES-128 vs AES-192
2054 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
2055 * is performed.
2056 *
2057 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2058 */
1090static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 2059static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1091 const u8 *key, unsigned int keylen) 2060 const u8 *key, unsigned int keylen)
1092{ 2061{
@@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1094 key, keylen); 2063 key, keylen);
1095} 2064}
1096 2065
2066/**
2067 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
2068 * @tfm: cipher handle
2069 * @dst: points to the buffer that will be filled with the ciphertext
2070 * @src: buffer holding the plaintext to be encrypted
2071 *
2072 * Invoke the encryption operation of one block. The caller must ensure that
2073 * the plaintext and ciphertext buffers are at least one block in size.
2074 */
1097static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 2075static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1098 u8 *dst, const u8 *src) 2076 u8 *dst, const u8 *src)
1099{ 2077{
@@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1101 dst, src); 2079 dst, src);
1102} 2080}
1103 2081
2082/**
2083 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
2084 * @tfm: cipher handle
2085 * @dst: points to the buffer that will be filled with the plaintext
2086 * @src: buffer holding the ciphertext to be decrypted
2087 *
2088 * Invoke the decryption operation of one block. The caller must ensure that
2089 * the plaintext and ciphertext buffers are at least one block in size.
2090 */
1104static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 2091static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1105 u8 *dst, const u8 *src) 2092 u8 *dst, const u8 *src)
1106{ 2093{
@@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1108 dst, src); 2095 dst, src);
1109} 2096}
1110 2097
2098/**
2099 * DOC: Synchronous Message Digest API
2100 *
2101 * The synchronous message digest API is used with the ciphers of type
2102 * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
2103 */
2104
1111static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) 2105static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
1112{ 2106{
1113 return (struct crypto_hash *)tfm; 2107 return (struct crypto_hash *)tfm;
@@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
1120 return __crypto_hash_cast(tfm); 2114 return __crypto_hash_cast(tfm);
1121} 2115}
1122 2116
2117/**
2118 * crypto_alloc_hash() - allocate synchronous message digest handle
2119 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2120 * message digest cipher
2121 * @type: specifies the type of the cipher
2122 * @mask: specifies the mask for the cipher
2123 *
2124 * Allocate a cipher handle for a message digest. The returned struct
2125 * crypto_hash is the cipher handle that is required for any subsequent
2126 * API invocation for that message digest.
2127 *
2128 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
2129 * of an error, PTR_ERR() returns the error code.
2130 */
1123static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, 2131static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
1124 u32 type, u32 mask) 2132 u32 type, u32 mask)
1125{ 2133{
@@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
1136 return &tfm->base; 2144 return &tfm->base;
1137} 2145}
1138 2146
2147/**
2148 * crypto_free_hash() - zeroize and free message digest handle
2149 * @tfm: cipher handle to be freed
2150 */
1139static inline void crypto_free_hash(struct crypto_hash *tfm) 2151static inline void crypto_free_hash(struct crypto_hash *tfm)
1140{ 2152{
1141 crypto_free_tfm(crypto_hash_tfm(tfm)); 2153 crypto_free_tfm(crypto_hash_tfm(tfm));
1142} 2154}
1143 2155
2156/**
2157 * crypto_has_hash() - Search for the availability of a message digest
2158 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2159 * message digest cipher
2160 * @type: specifies the type of the cipher
2161 * @mask: specifies the mask for the cipher
2162 *
2163 * Return: true when the message digest cipher is known to the kernel crypto
2164 * API; false otherwise
2165 */
1144static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) 2166static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
1145{ 2167{
1146 type &= ~CRYPTO_ALG_TYPE_MASK; 2168 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
1156 return &crypto_hash_tfm(tfm)->crt_hash; 2178 return &crypto_hash_tfm(tfm)->crt_hash;
1157} 2179}
1158 2180
2181/**
2182 * crypto_hash_blocksize() - obtain block size for message digest
2183 * @tfm: cipher handle
2184 *
2185 * The block size for the message digest cipher referenced with the cipher
2186 * handle is returned.
2187 *
2188 * Return: block size of cipher
2189 */
1159static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) 2190static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
1160{ 2191{
1161 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); 2192 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
@@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
1166 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); 2197 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
1167} 2198}
1168 2199
2200/**
2201 * crypto_hash_digestsize() - obtain message digest size
2202 * @tfm: cipher handle
2203 *
2204 * The size for the message digest created by the message digest cipher
2205 * referenced with the cipher handle is returned.
2206 *
2207 * Return: message digest size
2208 */
1169static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) 2209static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
1170{ 2210{
1171 return crypto_hash_crt(tfm)->digestsize; 2211 return crypto_hash_crt(tfm)->digestsize;
@@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
1186 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); 2226 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
1187} 2227}
1188 2228
2229/**
2230 * crypto_hash_init() - (re)initialize message digest handle
2231 * @desc: cipher request handle that to be filled by caller --
2232 * desc.tfm is filled with the hash cipher handle;
2233 * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
2234 *
2235 * The call (re-)initializes the message digest referenced by the hash cipher
2236 * request handle. Any potentially existing state created by previous
2237 * operations is discarded.
2238 *
2239 * Return: 0 if the message digest initialization was successful; < 0 if an
2240 * error occurred
2241 */
1189static inline int crypto_hash_init(struct hash_desc *desc) 2242static inline int crypto_hash_init(struct hash_desc *desc)
1190{ 2243{
1191 return crypto_hash_crt(desc->tfm)->init(desc); 2244 return crypto_hash_crt(desc->tfm)->init(desc);
1192} 2245}
1193 2246
2247/**
2248 * crypto_hash_update() - add data to message digest for processing
2249 * @desc: cipher request handle
2250 * @sg: scatter / gather list pointing to the data to be added to the message
2251 * digest
2252 * @nbytes: number of bytes to be processed from @sg
2253 *
2254 * Updates the message digest state of the cipher handle pointed to by the
2255 * hash cipher request handle with the input data pointed to by the
2256 * scatter/gather list.
2257 *
2258 * Return: 0 if the message digest update was successful; < 0 if an error
2259 * occurred
2260 */
1194static inline int crypto_hash_update(struct hash_desc *desc, 2261static inline int crypto_hash_update(struct hash_desc *desc,
1195 struct scatterlist *sg, 2262 struct scatterlist *sg,
1196 unsigned int nbytes) 2263 unsigned int nbytes)
@@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc,
1198 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); 2265 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
1199} 2266}
1200 2267
2268/**
2269 * crypto_hash_final() - calculate message digest
2270 * @desc: cipher request handle
2271 * @out: message digest output buffer -- The caller must ensure that the out
2272 * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
2273 * function).
2274 *
2275 * Finalize the message digest operation and create the message digest
2276 * based on all data added to the cipher handle. The message digest is placed
2277 * into the output buffer.
2278 *
2279 * Return: 0 if the message digest creation was successful; < 0 if an error
2280 * occurred
2281 */
1201static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) 2282static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
1202{ 2283{
1203 return crypto_hash_crt(desc->tfm)->final(desc, out); 2284 return crypto_hash_crt(desc->tfm)->final(desc, out);
1204} 2285}
1205 2286
2287/**
2288 * crypto_hash_digest() - calculate message digest for a buffer
2289 * @desc: see crypto_hash_final()
2290 * @sg: see crypto_hash_update()
2291 * @nbytes: see crypto_hash_update()
2292 * @out: see crypto_hash_final()
2293 *
2294 * This function is a "short-hand" for the function calls of crypto_hash_init,
2295 * crypto_hash_update and crypto_hash_final. The parameters have the same
2296 * meaning as discussed for those separate three functions.
2297 *
2298 * Return: 0 if the message digest creation was successful; < 0 if an error
2299 * occurred
2300 */
1206static inline int crypto_hash_digest(struct hash_desc *desc, 2301static inline int crypto_hash_digest(struct hash_desc *desc,
1207 struct scatterlist *sg, 2302 struct scatterlist *sg,
1208 unsigned int nbytes, u8 *out) 2303 unsigned int nbytes, u8 *out)
@@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc,
1210 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); 2305 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
1211} 2306}
1212 2307
2308/**
2309 * crypto_hash_setkey() - set key for message digest
2310 * @hash: cipher handle
2311 * @key: buffer holding the key
2312 * @keylen: length of the key in bytes
2313 *
2314 * The caller provided key is set for the message digest cipher. The cipher
2315 * handle must point to a keyed hash in order for this function to succeed.
2316 *
2317 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2318 */
1213static inline int crypto_hash_setkey(struct crypto_hash *hash, 2319static inline int crypto_hash_setkey(struct crypto_hash *hash,
1214 const u8 *key, unsigned int keylen) 2320 const u8 *key, unsigned int keylen)
1215{ 2321{
diff --git a/include/net/sock.h b/include/net/sock.h
index c3e83c9a8ab8..2210fec65669 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1593,6 +1593,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1593 int *errcode, int max_page_order); 1593 int *errcode, int max_page_order);
1594void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); 1594void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1595void sock_kfree_s(struct sock *sk, void *mem, int size); 1595void sock_kfree_s(struct sock *sk, void *mem, int size);
1596void sock_kzfree_s(struct sock *sk, void *mem, int size);
1596void sk_send_sigurg(struct sock *sk); 1597void sk_send_sigurg(struct sock *sk);
1597 1598
1598/* 1599/*
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index 0f9acce5b1ff..f2acd2fde1f3 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -32,6 +32,8 @@ struct af_alg_iv {
32#define ALG_SET_KEY 1 32#define ALG_SET_KEY 1
33#define ALG_SET_IV 2 33#define ALG_SET_IV 2
34#define ALG_SET_OP 3 34#define ALG_SET_OP 3
35#define ALG_SET_AEAD_ASSOCLEN 4
36#define ALG_SET_AEAD_AUTHSIZE 5
35 37
36/* Operations */ 38/* Operations */
37#define ALG_OP_DECRYPT 0 39#define ALG_OP_DECRYPT 0
diff --git a/net/core/sock.c b/net/core/sock.c
index 9a56b2000c3f..1c7a33db1314 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1731,18 +1731,34 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1731} 1731}
1732EXPORT_SYMBOL(sock_kmalloc); 1732EXPORT_SYMBOL(sock_kmalloc);
1733 1733
1734/* 1734/* Free an option memory block. Note, we actually want the inline
1735 * Free an option memory block. 1735 * here as this allows gcc to detect the nullify and fold away the
1736 * condition entirely.
1736 */ 1737 */
1737void sock_kfree_s(struct sock *sk, void *mem, int size) 1738static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1739 const bool nullify)
1738{ 1740{
1739 if (WARN_ON_ONCE(!mem)) 1741 if (WARN_ON_ONCE(!mem))
1740 return; 1742 return;
1741 kfree(mem); 1743 if (nullify)
1744 kzfree(mem);
1745 else
1746 kfree(mem);
1742 atomic_sub(size, &sk->sk_omem_alloc); 1747 atomic_sub(size, &sk->sk_omem_alloc);
1743} 1748}
1749
1750void sock_kfree_s(struct sock *sk, void *mem, int size)
1751{
1752 __sock_kfree_s(sk, mem, size, false);
1753}
1744EXPORT_SYMBOL(sock_kfree_s); 1754EXPORT_SYMBOL(sock_kfree_s);
1745 1755
1756void sock_kzfree_s(struct sock *sk, void *mem, int size)
1757{
1758 __sock_kfree_s(sk, mem, size, true);
1759}
1760EXPORT_SYMBOL(sock_kzfree_s);
1761
1746/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1762/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1747 I think, these locks should be removed for datagram sockets. 1763 I think, these locks should be removed for datagram sockets.
1748 */ 1764 */