diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 16:33:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 16:33:26 -0500 |
commit | e3aa91a7cb21a595169b20c64f63ca39a91a0c43 (patch) | |
tree | 6a92a2e595629949a45336c770c2408abba8444d /include | |
parent | 78a45c6f067824cf5d0a9fedea7339ac2e28603c (diff) | |
parent | 8606813a6c8997fd3bb805186056d78670eb86ca (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- The crypto API is now documented :)
- Disallow arbitrary module loading through crypto API.
- Allow get request with empty driver name through crypto_user.
- Allow speed testing of arbitrary hash functions.
- Add caam support for ctr(aes), gcm(aes) and their derivatives.
- nx now supports concurrent hashing properly.
- Add sahara support for SHA1/256.
- Add ARM64 version of CRC32.
- Misc fixes.
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (77 commits)
crypto: tcrypt - Allow speed testing of arbitrary hash functions
crypto: af_alg - add user space interface for AEAD
crypto: qat - fix problem with coalescing enable logic
crypto: sahara - add support for SHA1/256
crypto: sahara - replace tasklets with kthread
crypto: sahara - add support for i.MX53
crypto: sahara - fix spinlock initialization
crypto: arm - replace memset by memzero_explicit
crypto: powerpc - replace memset by memzero_explicit
crypto: sha - replace memset by memzero_explicit
crypto: sparc - replace memset by memzero_explicit
crypto: algif_skcipher - initialize upon init request
crypto: algif_skcipher - removed unneeded code
crypto: algif_skcipher - Fixed blocking recvmsg
crypto: drbg - use memzero_explicit() for clearing sensitive data
crypto: drbg - use MODULE_ALIAS_CRYPTO
crypto: include crypto- module prefix in template
crypto: user - add MODULE_ALIAS
crypto: sha-mb - remove a bogus NULL check
crytpo: qat - Fix 64 bytes requests
...
Diffstat (limited to 'include')
-rw-r--r-- | include/crypto/hash.h | 492 | ||||
-rw-r--r-- | include/crypto/if_alg.h | 1 | ||||
-rw-r--r-- | include/crypto/rng.h | 80 | ||||
-rw-r--r-- | include/linux/crypto.h | 1112 | ||||
-rw-r--r-- | include/net/sock.h | 1 | ||||
-rw-r--r-- | include/uapi/linux/if_alg.h | 2 |
6 files changed, 1685 insertions, 3 deletions
diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 74b13ec1ebd4..98abda9ed3aa 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h | |||
@@ -17,6 +17,32 @@ | |||
17 | 17 | ||
18 | struct crypto_ahash; | 18 | struct crypto_ahash; |
19 | 19 | ||
20 | /** | ||
21 | * DOC: Message Digest Algorithm Definitions | ||
22 | * | ||
23 | * These data structures define modular message digest algorithm | ||
24 | * implementations, managed via crypto_register_ahash(), | ||
25 | * crypto_register_shash(), crypto_unregister_ahash() and | ||
26 | * crypto_unregister_shash(). | ||
27 | */ | ||
28 | |||
29 | /** | ||
30 | * struct hash_alg_common - define properties of message digest | ||
31 | * @digestsize: Size of the result of the transformation. A buffer of this size | ||
32 | * must be available to the @final and @finup calls, so they can | ||
33 | * store the resulting hash into it. For various predefined sizes, | ||
34 | * search include/crypto/ using | ||
35 | * git grep _DIGEST_SIZE include/crypto. | ||
36 | * @statesize: Size of the block for partial state of the transformation. A | ||
37 | * buffer of this size must be passed to the @export function as it | ||
38 | * will save the partial state of the transformation into it. On the | ||
39 | * other side, the @import function will load the state from a | ||
40 | * buffer of this size as well. | ||
41 | * @base: Start of data structure of cipher algorithm. The common data | ||
42 | * structure of crypto_alg contains information common to all ciphers. | ||
43 | * The hash_alg_common data structure now adds the hash-specific | ||
44 | * information. | ||
45 | */ | ||
20 | struct hash_alg_common { | 46 | struct hash_alg_common { |
21 | unsigned int digestsize; | 47 | unsigned int digestsize; |
22 | unsigned int statesize; | 48 | unsigned int statesize; |
@@ -37,6 +63,63 @@ struct ahash_request { | |||
37 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | 63 | void *__ctx[] CRYPTO_MINALIGN_ATTR; |
38 | }; | 64 | }; |
39 | 65 | ||
66 | /** | ||
67 | * struct ahash_alg - asynchronous message digest definition | ||
68 | * @init: Initialize the transformation context. Intended only to initialize the | ||
69 | * state of the HASH transformation at the begining. This shall fill in | ||
70 | * the internal structures used during the entire duration of the whole | ||
71 | * transformation. No data processing happens at this point. | ||
72 | * @update: Push a chunk of data into the driver for transformation. This | ||
73 | * function actually pushes blocks of data from upper layers into the | ||
74 | * driver, which then passes those to the hardware as seen fit. This | ||
75 | * function must not finalize the HASH transformation by calculating the | ||
76 | * final message digest as this only adds more data into the | ||
77 | * transformation. This function shall not modify the transformation | ||
78 | * context, as this function may be called in parallel with the same | ||
79 | * transformation object. Data processing can happen synchronously | ||
80 | * [SHASH] or asynchronously [AHASH] at this point. | ||
81 | * @final: Retrieve result from the driver. This function finalizes the | ||
82 | * transformation and retrieves the resulting hash from the driver and | ||
83 | * pushes it back to upper layers. No data processing happens at this | ||
84 | * point. | ||
85 | * @finup: Combination of @update and @final. This function is effectively a | ||
86 | * combination of @update and @final calls issued in sequence. As some | ||
87 | * hardware cannot do @update and @final separately, this callback was | ||
88 | * added to allow such hardware to be used at least by IPsec. Data | ||
89 | * processing can happen synchronously [SHASH] or asynchronously [AHASH] | ||
90 | * at this point. | ||
91 | * @digest: Combination of @init and @update and @final. This function | ||
92 | * effectively behaves as the entire chain of operations, @init, | ||
93 | * @update and @final issued in sequence. Just like @finup, this was | ||
94 | * added for hardware which cannot do even the @finup, but can only do | ||
95 | * the whole transformation in one run. Data processing can happen | ||
96 | * synchronously [SHASH] or asynchronously [AHASH] at this point. | ||
97 | * @setkey: Set optional key used by the hashing algorithm. Intended to push | ||
98 | * optional key used by the hashing algorithm from upper layers into | ||
99 | * the driver. This function can store the key in the transformation | ||
100 | * context or can outright program it into the hardware. In the former | ||
101 | * case, one must be careful to program the key into the hardware at | ||
102 | * appropriate time and one must be careful that .setkey() can be | ||
103 | * called multiple times during the existence of the transformation | ||
104 | * object. Not all hashing algorithms do implement this function as it | ||
105 | * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT | ||
106 | * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement | ||
107 | * this function. This function must be called before any other of the | ||
108 | * @init, @update, @final, @finup, @digest is called. No data | ||
109 | * processing happens at this point. | ||
110 | * @export: Export partial state of the transformation. This function dumps the | ||
111 | * entire state of the ongoing transformation into a provided block of | ||
112 | * data so it can be @import 'ed back later on. This is useful in case | ||
113 | * you want to save partial result of the transformation after | ||
114 | * processing certain amount of data and reload this partial result | ||
115 | * multiple times later on for multiple re-use. No data processing | ||
116 | * happens at this point. | ||
117 | * @import: Import partial state of the transformation. This function loads the | ||
118 | * entire state of the ongoing transformation from a provided block of | ||
119 | * data so the transformation can continue from this point onward. No | ||
120 | * data processing happens at this point. | ||
121 | * @halg: see struct hash_alg_common | ||
122 | */ | ||
40 | struct ahash_alg { | 123 | struct ahash_alg { |
41 | int (*init)(struct ahash_request *req); | 124 | int (*init)(struct ahash_request *req); |
42 | int (*update)(struct ahash_request *req); | 125 | int (*update)(struct ahash_request *req); |
@@ -63,6 +146,23 @@ struct shash_desc { | |||
63 | crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ | 146 | crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ |
64 | struct shash_desc *shash = (struct shash_desc *)__##shash##_desc | 147 | struct shash_desc *shash = (struct shash_desc *)__##shash##_desc |
65 | 148 | ||
149 | /** | ||
150 | * struct shash_alg - synchronous message digest definition | ||
151 | * @init: see struct ahash_alg | ||
152 | * @update: see struct ahash_alg | ||
153 | * @final: see struct ahash_alg | ||
154 | * @finup: see struct ahash_alg | ||
155 | * @digest: see struct ahash_alg | ||
156 | * @export: see struct ahash_alg | ||
157 | * @import: see struct ahash_alg | ||
158 | * @setkey: see struct ahash_alg | ||
159 | * @digestsize: see struct ahash_alg | ||
160 | * @statesize: see struct ahash_alg | ||
161 | * @descsize: Size of the operational state for the message digest. This state | ||
162 | * size is the memory size that needs to be allocated for | ||
163 | * shash_desc.__ctx | ||
164 | * @base: internally used | ||
165 | */ | ||
66 | struct shash_alg { | 166 | struct shash_alg { |
67 | int (*init)(struct shash_desc *desc); | 167 | int (*init)(struct shash_desc *desc); |
68 | int (*update)(struct shash_desc *desc, const u8 *data, | 168 | int (*update)(struct shash_desc *desc, const u8 *data, |
@@ -107,11 +207,35 @@ struct crypto_shash { | |||
107 | struct crypto_tfm base; | 207 | struct crypto_tfm base; |
108 | }; | 208 | }; |
109 | 209 | ||
210 | /** | ||
211 | * DOC: Asynchronous Message Digest API | ||
212 | * | ||
213 | * The asynchronous message digest API is used with the ciphers of type | ||
214 | * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) | ||
215 | * | ||
216 | * The asynchronous cipher operation discussion provided for the | ||
217 | * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well. | ||
218 | */ | ||
219 | |||
110 | static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) | 220 | static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) |
111 | { | 221 | { |
112 | return container_of(tfm, struct crypto_ahash, base); | 222 | return container_of(tfm, struct crypto_ahash, base); |
113 | } | 223 | } |
114 | 224 | ||
225 | /** | ||
226 | * crypto_alloc_ahash() - allocate ahash cipher handle | ||
227 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
228 | * ahash cipher | ||
229 | * @type: specifies the type of the cipher | ||
230 | * @mask: specifies the mask for the cipher | ||
231 | * | ||
232 | * Allocate a cipher handle for an ahash. The returned struct | ||
233 | * crypto_ahash is the cipher handle that is required for any subsequent | ||
234 | * API invocation for that ahash. | ||
235 | * | ||
236 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
237 | * of an error, PTR_ERR() returns the error code. | ||
238 | */ | ||
115 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, | 239 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, |
116 | u32 mask); | 240 | u32 mask); |
117 | 241 | ||
@@ -120,6 +244,10 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) | |||
120 | return &tfm->base; | 244 | return &tfm->base; |
121 | } | 245 | } |
122 | 246 | ||
247 | /** | ||
248 | * crypto_free_ahash() - zeroize and free the ahash handle | ||
249 | * @tfm: cipher handle to be freed | ||
250 | */ | ||
123 | static inline void crypto_free_ahash(struct crypto_ahash *tfm) | 251 | static inline void crypto_free_ahash(struct crypto_ahash *tfm) |
124 | { | 252 | { |
125 | crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); | 253 | crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); |
@@ -143,6 +271,16 @@ static inline struct hash_alg_common *crypto_hash_alg_common( | |||
143 | return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); | 271 | return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); |
144 | } | 272 | } |
145 | 273 | ||
274 | /** | ||
275 | * crypto_ahash_digestsize() - obtain message digest size | ||
276 | * @tfm: cipher handle | ||
277 | * | ||
278 | * The size for the message digest created by the message digest cipher | ||
279 | * referenced with the cipher handle is returned. | ||
280 | * | ||
281 | * | ||
282 | * Return: message digest size of cipher | ||
283 | */ | ||
146 | static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) | 284 | static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) |
147 | { | 285 | { |
148 | return crypto_hash_alg_common(tfm)->digestsize; | 286 | return crypto_hash_alg_common(tfm)->digestsize; |
@@ -168,12 +306,32 @@ static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) | |||
168 | crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); | 306 | crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); |
169 | } | 307 | } |
170 | 308 | ||
309 | /** | ||
310 | * crypto_ahash_reqtfm() - obtain cipher handle from request | ||
311 | * @req: asynchronous request handle that contains the reference to the ahash | ||
312 | * cipher handle | ||
313 | * | ||
314 | * Return the ahash cipher handle that is registered with the asynchronous | ||
315 | * request handle ahash_request. | ||
316 | * | ||
317 | * Return: ahash cipher handle | ||
318 | */ | ||
171 | static inline struct crypto_ahash *crypto_ahash_reqtfm( | 319 | static inline struct crypto_ahash *crypto_ahash_reqtfm( |
172 | struct ahash_request *req) | 320 | struct ahash_request *req) |
173 | { | 321 | { |
174 | return __crypto_ahash_cast(req->base.tfm); | 322 | return __crypto_ahash_cast(req->base.tfm); |
175 | } | 323 | } |
176 | 324 | ||
325 | /** | ||
326 | * crypto_ahash_reqsize() - obtain size of the request data structure | ||
327 | * @tfm: cipher handle | ||
328 | * | ||
329 | * Return the size of the ahash state size. With the crypto_ahash_export | ||
330 | * function, the caller can export the state into a buffer whose size is | ||
331 | * defined with this function. | ||
332 | * | ||
333 | * Return: size of the ahash state | ||
334 | */ | ||
177 | static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) | 335 | static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) |
178 | { | 336 | { |
179 | return tfm->reqsize; | 337 | return tfm->reqsize; |
@@ -184,38 +342,166 @@ static inline void *ahash_request_ctx(struct ahash_request *req) | |||
184 | return req->__ctx; | 342 | return req->__ctx; |
185 | } | 343 | } |
186 | 344 | ||
345 | /** | ||
346 | * crypto_ahash_setkey - set key for cipher handle | ||
347 | * @tfm: cipher handle | ||
348 | * @key: buffer holding the key | ||
349 | * @keylen: length of the key in bytes | ||
350 | * | ||
351 | * The caller provided key is set for the ahash cipher. The cipher | ||
352 | * handle must point to a keyed hash in order for this function to succeed. | ||
353 | * | ||
354 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
355 | */ | ||
187 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | 356 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
188 | unsigned int keylen); | 357 | unsigned int keylen); |
358 | |||
359 | /** | ||
360 | * crypto_ahash_finup() - update and finalize message digest | ||
361 | * @req: reference to the ahash_request handle that holds all information | ||
362 | * needed to perform the cipher operation | ||
363 | * | ||
364 | * This function is a "short-hand" for the function calls of | ||
365 | * crypto_ahash_update and crypto_shash_final. The parameters have the same | ||
366 | * meaning as discussed for those separate functions. | ||
367 | * | ||
368 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
369 | * occurred | ||
370 | */ | ||
189 | int crypto_ahash_finup(struct ahash_request *req); | 371 | int crypto_ahash_finup(struct ahash_request *req); |
372 | |||
373 | /** | ||
374 | * crypto_ahash_final() - calculate message digest | ||
375 | * @req: reference to the ahash_request handle that holds all information | ||
376 | * needed to perform the cipher operation | ||
377 | * | ||
378 | * Finalize the message digest operation and create the message digest | ||
379 | * based on all data added to the cipher handle. The message digest is placed | ||
380 | * into the output buffer registered with the ahash_request handle. | ||
381 | * | ||
382 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
383 | * occurred | ||
384 | */ | ||
190 | int crypto_ahash_final(struct ahash_request *req); | 385 | int crypto_ahash_final(struct ahash_request *req); |
386 | |||
387 | /** | ||
388 | * crypto_ahash_digest() - calculate message digest for a buffer | ||
389 | * @req: reference to the ahash_request handle that holds all information | ||
390 | * needed to perform the cipher operation | ||
391 | * | ||
392 | * This function is a "short-hand" for the function calls of crypto_ahash_init, | ||
393 | * crypto_ahash_update and crypto_ahash_final. The parameters have the same | ||
394 | * meaning as discussed for those separate three functions. | ||
395 | * | ||
396 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
397 | * occurred | ||
398 | */ | ||
191 | int crypto_ahash_digest(struct ahash_request *req); | 399 | int crypto_ahash_digest(struct ahash_request *req); |
192 | 400 | ||
401 | /** | ||
402 | * crypto_ahash_export() - extract current message digest state | ||
403 | * @req: reference to the ahash_request handle whose state is exported | ||
404 | * @out: output buffer of sufficient size that can hold the hash state | ||
405 | * | ||
406 | * This function exports the hash state of the ahash_request handle into the | ||
407 | * caller-allocated output buffer out which must have sufficient size (e.g. by | ||
408 | * calling crypto_ahash_reqsize). | ||
409 | * | ||
410 | * Return: 0 if the export was successful; < 0 if an error occurred | ||
411 | */ | ||
193 | static inline int crypto_ahash_export(struct ahash_request *req, void *out) | 412 | static inline int crypto_ahash_export(struct ahash_request *req, void *out) |
194 | { | 413 | { |
195 | return crypto_ahash_reqtfm(req)->export(req, out); | 414 | return crypto_ahash_reqtfm(req)->export(req, out); |
196 | } | 415 | } |
197 | 416 | ||
417 | /** | ||
418 | * crypto_ahash_import() - import message digest state | ||
419 | * @req: reference to ahash_request handle the state is imported into | ||
420 | * @in: buffer holding the state | ||
421 | * | ||
422 | * This function imports the hash state into the ahash_request handle from the | ||
423 | * input buffer. That buffer should have been generated with the | ||
424 | * crypto_ahash_export function. | ||
425 | * | ||
426 | * Return: 0 if the import was successful; < 0 if an error occurred | ||
427 | */ | ||
198 | static inline int crypto_ahash_import(struct ahash_request *req, const void *in) | 428 | static inline int crypto_ahash_import(struct ahash_request *req, const void *in) |
199 | { | 429 | { |
200 | return crypto_ahash_reqtfm(req)->import(req, in); | 430 | return crypto_ahash_reqtfm(req)->import(req, in); |
201 | } | 431 | } |
202 | 432 | ||
433 | /** | ||
434 | * crypto_ahash_init() - (re)initialize message digest handle | ||
435 | * @req: ahash_request handle that already is initialized with all necessary | ||
436 | * data using the ahash_request_* API functions | ||
437 | * | ||
438 | * The call (re-)initializes the message digest referenced by the ahash_request | ||
439 | * handle. Any potentially existing state created by previous operations is | ||
440 | * discarded. | ||
441 | * | ||
442 | * Return: 0 if the message digest initialization was successful; < 0 if an | ||
443 | * error occurred | ||
444 | */ | ||
203 | static inline int crypto_ahash_init(struct ahash_request *req) | 445 | static inline int crypto_ahash_init(struct ahash_request *req) |
204 | { | 446 | { |
205 | return crypto_ahash_reqtfm(req)->init(req); | 447 | return crypto_ahash_reqtfm(req)->init(req); |
206 | } | 448 | } |
207 | 449 | ||
450 | /** | ||
451 | * crypto_ahash_update() - add data to message digest for processing | ||
452 | * @req: ahash_request handle that was previously initialized with the | ||
453 | * crypto_ahash_init call. | ||
454 | * | ||
455 | * Updates the message digest state of the &ahash_request handle. The input data | ||
456 | * is pointed to by the scatter/gather list registered in the &ahash_request | ||
457 | * handle | ||
458 | * | ||
459 | * Return: 0 if the message digest update was successful; < 0 if an error | ||
460 | * occurred | ||
461 | */ | ||
208 | static inline int crypto_ahash_update(struct ahash_request *req) | 462 | static inline int crypto_ahash_update(struct ahash_request *req) |
209 | { | 463 | { |
210 | return crypto_ahash_reqtfm(req)->update(req); | 464 | return crypto_ahash_reqtfm(req)->update(req); |
211 | } | 465 | } |
212 | 466 | ||
467 | /** | ||
468 | * DOC: Asynchronous Hash Request Handle | ||
469 | * | ||
470 | * The &ahash_request data structure contains all pointers to data | ||
471 | * required for the asynchronous cipher operation. This includes the cipher | ||
472 | * handle (which can be used by multiple &ahash_request instances), pointer | ||
473 | * to plaintext and the message digest output buffer, asynchronous callback | ||
474 | * function, etc. It acts as a handle to the ahash_request_* API calls in a | ||
475 | * similar way as ahash handle to the crypto_ahash_* API calls. | ||
476 | */ | ||
477 | |||
478 | /** | ||
479 | * ahash_request_set_tfm() - update cipher handle reference in request | ||
480 | * @req: request handle to be modified | ||
481 | * @tfm: cipher handle that shall be added to the request handle | ||
482 | * | ||
483 | * Allow the caller to replace the existing ahash handle in the request | ||
484 | * data structure with a different one. | ||
485 | */ | ||
213 | static inline void ahash_request_set_tfm(struct ahash_request *req, | 486 | static inline void ahash_request_set_tfm(struct ahash_request *req, |
214 | struct crypto_ahash *tfm) | 487 | struct crypto_ahash *tfm) |
215 | { | 488 | { |
216 | req->base.tfm = crypto_ahash_tfm(tfm); | 489 | req->base.tfm = crypto_ahash_tfm(tfm); |
217 | } | 490 | } |
218 | 491 | ||
492 | /** | ||
493 | * ahash_request_alloc() - allocate request data structure | ||
494 | * @tfm: cipher handle to be registered with the request | ||
495 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
496 | * | ||
497 | * Allocate the request data structure that must be used with the ahash | ||
498 | * message digest API calls. During | ||
499 | * the allocation, the provided ahash handle | ||
500 | * is registered in the request data structure. | ||
501 | * | ||
502 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
503 | * of an error, PTR_ERR() returns the error code. | ||
504 | */ | ||
219 | static inline struct ahash_request *ahash_request_alloc( | 505 | static inline struct ahash_request *ahash_request_alloc( |
220 | struct crypto_ahash *tfm, gfp_t gfp) | 506 | struct crypto_ahash *tfm, gfp_t gfp) |
221 | { | 507 | { |
@@ -230,6 +516,10 @@ static inline struct ahash_request *ahash_request_alloc( | |||
230 | return req; | 516 | return req; |
231 | } | 517 | } |
232 | 518 | ||
519 | /** | ||
520 | * ahash_request_free() - zeroize and free the request data structure | ||
521 | * @req: request data structure cipher handle to be freed | ||
522 | */ | ||
233 | static inline void ahash_request_free(struct ahash_request *req) | 523 | static inline void ahash_request_free(struct ahash_request *req) |
234 | { | 524 | { |
235 | kzfree(req); | 525 | kzfree(req); |
@@ -241,6 +531,31 @@ static inline struct ahash_request *ahash_request_cast( | |||
241 | return container_of(req, struct ahash_request, base); | 531 | return container_of(req, struct ahash_request, base); |
242 | } | 532 | } |
243 | 533 | ||
534 | /** | ||
535 | * ahash_request_set_callback() - set asynchronous callback function | ||
536 | * @req: request handle | ||
537 | * @flags: specify zero or an ORing of the flags | ||
538 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
539 | * increase the wait queue beyond the initial maximum size; | ||
540 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
541 | * @compl: callback function pointer to be registered with the request handle | ||
542 | * @data: The data pointer refers to memory that is not used by the kernel | ||
543 | * crypto API, but provided to the callback function for it to use. Here, | ||
544 | * the caller can provide a reference to memory the callback function can | ||
545 | * operate on. As the callback function is invoked asynchronously to the | ||
546 | * related functionality, it may need to access data structures of the | ||
547 | * related functionality which can be referenced using this pointer. The | ||
548 | * callback function can access the memory via the "data" field in the | ||
549 | * &crypto_async_request data structure provided to the callback function. | ||
550 | * | ||
551 | * This function allows setting the callback function that is triggered once | ||
552 | * the cipher operation completes. | ||
553 | * | ||
554 | * The callback function is registered with the &ahash_request handle and | ||
555 | * must comply with the following template | ||
556 | * | ||
557 | * void callback_function(struct crypto_async_request *req, int error) | ||
558 | */ | ||
244 | static inline void ahash_request_set_callback(struct ahash_request *req, | 559 | static inline void ahash_request_set_callback(struct ahash_request *req, |
245 | u32 flags, | 560 | u32 flags, |
246 | crypto_completion_t compl, | 561 | crypto_completion_t compl, |
@@ -251,6 +566,19 @@ static inline void ahash_request_set_callback(struct ahash_request *req, | |||
251 | req->base.flags = flags; | 566 | req->base.flags = flags; |
252 | } | 567 | } |
253 | 568 | ||
569 | /** | ||
570 | * ahash_request_set_crypt() - set data buffers | ||
571 | * @req: ahash_request handle to be updated | ||
572 | * @src: source scatter/gather list | ||
573 | * @result: buffer that is filled with the message digest -- the caller must | ||
574 | * ensure that the buffer has sufficient space by, for example, calling | ||
575 | * crypto_ahash_digestsize() | ||
576 | * @nbytes: number of bytes to process from the source scatter/gather list | ||
577 | * | ||
578 | * By using this call, the caller references the source scatter/gather list. | ||
579 | * The source scatter/gather list points to the data the message digest is to | ||
580 | * be calculated for. | ||
581 | */ | ||
254 | static inline void ahash_request_set_crypt(struct ahash_request *req, | 582 | static inline void ahash_request_set_crypt(struct ahash_request *req, |
255 | struct scatterlist *src, u8 *result, | 583 | struct scatterlist *src, u8 *result, |
256 | unsigned int nbytes) | 584 | unsigned int nbytes) |
@@ -260,6 +588,33 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, | |||
260 | req->result = result; | 588 | req->result = result; |
261 | } | 589 | } |
262 | 590 | ||
591 | /** | ||
592 | * DOC: Synchronous Message Digest API | ||
593 | * | ||
594 | * The synchronous message digest API is used with the ciphers of type | ||
595 | * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto) | ||
596 | * | ||
597 | * The message digest API is able to maintain state information for the | ||
598 | * caller. | ||
599 | * | ||
600 | * The synchronous message digest API can store user-related context in in its | ||
601 | * shash_desc request data structure. | ||
602 | */ | ||
603 | |||
604 | /** | ||
605 | * crypto_alloc_shash() - allocate message digest handle | ||
606 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
607 | * message digest cipher | ||
608 | * @type: specifies the type of the cipher | ||
609 | * @mask: specifies the mask for the cipher | ||
610 | * | ||
611 | * Allocate a cipher handle for a message digest. The returned &struct | ||
612 | * crypto_shash is the cipher handle that is required for any subsequent | ||
613 | * API invocation for that message digest. | ||
614 | * | ||
615 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
616 | * of an error, PTR_ERR() returns the error code. | ||
617 | */ | ||
263 | struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, | 618 | struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, |
264 | u32 mask); | 619 | u32 mask); |
265 | 620 | ||
@@ -268,6 +623,10 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) | |||
268 | return &tfm->base; | 623 | return &tfm->base; |
269 | } | 624 | } |
270 | 625 | ||
626 | /** | ||
627 | * crypto_free_shash() - zeroize and free the message digest handle | ||
628 | * @tfm: cipher handle to be freed | ||
629 | */ | ||
271 | static inline void crypto_free_shash(struct crypto_shash *tfm) | 630 | static inline void crypto_free_shash(struct crypto_shash *tfm) |
272 | { | 631 | { |
273 | crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); | 632 | crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); |
@@ -279,6 +638,15 @@ static inline unsigned int crypto_shash_alignmask( | |||
279 | return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); | 638 | return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); |
280 | } | 639 | } |
281 | 640 | ||
641 | /** | ||
642 | * crypto_shash_blocksize() - obtain block size for cipher | ||
643 | * @tfm: cipher handle | ||
644 | * | ||
645 | * The block size for the message digest cipher referenced with the cipher | ||
646 | * handle is returned. | ||
647 | * | ||
648 | * Return: block size of cipher | ||
649 | */ | ||
282 | static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) | 650 | static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) |
283 | { | 651 | { |
284 | return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); | 652 | return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); |
@@ -294,6 +662,15 @@ static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) | |||
294 | return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); | 662 | return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); |
295 | } | 663 | } |
296 | 664 | ||
665 | /** | ||
666 | * crypto_shash_digestsize() - obtain message digest size | ||
667 | * @tfm: cipher handle | ||
668 | * | ||
669 | * The size for the message digest created by the message digest cipher | ||
670 | * referenced with the cipher handle is returned. | ||
671 | * | ||
672 | * Return: digest size of cipher | ||
673 | */ | ||
297 | static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) | 674 | static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) |
298 | { | 675 | { |
299 | return crypto_shash_alg(tfm)->digestsize; | 676 | return crypto_shash_alg(tfm)->digestsize; |
@@ -319,6 +696,21 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) | |||
319 | crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); | 696 | crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); |
320 | } | 697 | } |
321 | 698 | ||
699 | /** | ||
700 | * crypto_shash_descsize() - obtain the operational state size | ||
701 | * @tfm: cipher handle | ||
702 | * | ||
703 | * The size of the operational state the cipher needs during operation is | ||
704 | * returned for the hash referenced with the cipher handle. This size is | ||
705 | * required to calculate the memory requirements to allow the caller allocating | ||
706 | * sufficient memory for operational state. | ||
707 | * | ||
708 | * The operational state is defined with struct shash_desc where the size of | ||
709 | * that data structure is to be calculated as | ||
710 | * sizeof(struct shash_desc) + crypto_shash_descsize(alg) | ||
711 | * | ||
712 | * Return: size of the operational state | ||
713 | */ | ||
322 | static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) | 714 | static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) |
323 | { | 715 | { |
324 | return tfm->descsize; | 716 | return tfm->descsize; |
@@ -329,29 +721,129 @@ static inline void *shash_desc_ctx(struct shash_desc *desc) | |||
329 | return desc->__ctx; | 721 | return desc->__ctx; |
330 | } | 722 | } |
331 | 723 | ||
724 | /** | ||
725 | * crypto_shash_setkey() - set key for message digest | ||
726 | * @tfm: cipher handle | ||
727 | * @key: buffer holding the key | ||
728 | * @keylen: length of the key in bytes | ||
729 | * | ||
730 | * The caller provided key is set for the keyed message digest cipher. The | ||
731 | * cipher handle must point to a keyed message digest cipher in order for this | ||
732 | * function to succeed. | ||
733 | * | ||
734 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
735 | */ | ||
332 | int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, | 736 | int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, |
333 | unsigned int keylen); | 737 | unsigned int keylen); |
738 | |||
739 | /** | ||
740 | * crypto_shash_digest() - calculate message digest for buffer | ||
741 | * @desc: see crypto_shash_final() | ||
742 | * @data: see crypto_shash_update() | ||
743 | * @len: see crypto_shash_update() | ||
744 | * @out: see crypto_shash_final() | ||
745 | * | ||
746 | * This function is a "short-hand" for the function calls of crypto_shash_init, | ||
747 | * crypto_shash_update and crypto_shash_final. The parameters have the same | ||
748 | * meaning as discussed for those separate three functions. | ||
749 | * | ||
750 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
751 | * occurred | ||
752 | */ | ||
334 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | 753 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, |
335 | unsigned int len, u8 *out); | 754 | unsigned int len, u8 *out); |
336 | 755 | ||
756 | /** | ||
757 | * crypto_shash_export() - extract operational state for message digest | ||
758 | * @desc: reference to the operational state handle whose state is exported | ||
759 | * @out: output buffer of sufficient size that can hold the hash state | ||
760 | * | ||
761 | * This function exports the hash state of the operational state handle into the | ||
762 | * caller-allocated output buffer out which must have sufficient size (e.g. by | ||
763 | * calling crypto_shash_descsize). | ||
764 | * | ||
765 | * Return: 0 if the export creation was successful; < 0 if an error occurred | ||
766 | */ | ||
337 | static inline int crypto_shash_export(struct shash_desc *desc, void *out) | 767 | static inline int crypto_shash_export(struct shash_desc *desc, void *out) |
338 | { | 768 | { |
339 | return crypto_shash_alg(desc->tfm)->export(desc, out); | 769 | return crypto_shash_alg(desc->tfm)->export(desc, out); |
340 | } | 770 | } |
341 | 771 | ||
772 | /** | ||
773 | * crypto_shash_import() - import operational state | ||
774 | * @desc: reference to the operational state handle the state imported into | ||
775 | * @in: buffer holding the state | ||
776 | * | ||
777 | * This function imports the hash state into the operational state handle from | ||
778 | * the input buffer. That buffer should have been generated with the | ||
779 | * crypto_ahash_export function. | ||
780 | * | ||
781 | * Return: 0 if the import was successful; < 0 if an error occurred | ||
782 | */ | ||
342 | static inline int crypto_shash_import(struct shash_desc *desc, const void *in) | 783 | static inline int crypto_shash_import(struct shash_desc *desc, const void *in) |
343 | { | 784 | { |
344 | return crypto_shash_alg(desc->tfm)->import(desc, in); | 785 | return crypto_shash_alg(desc->tfm)->import(desc, in); |
345 | } | 786 | } |
346 | 787 | ||
788 | /** | ||
789 | * crypto_shash_init() - (re)initialize message digest | ||
790 | * @desc: operational state handle that is already filled | ||
791 | * | ||
792 | * The call (re-)initializes the message digest referenced by the | ||
793 | * operational state handle. Any potentially existing state created by | ||
794 | * previous operations is discarded. | ||
795 | * | ||
796 | * Return: 0 if the message digest initialization was successful; < 0 if an | ||
797 | * error occurred | ||
798 | */ | ||
347 | static inline int crypto_shash_init(struct shash_desc *desc) | 799 | static inline int crypto_shash_init(struct shash_desc *desc) |
348 | { | 800 | { |
349 | return crypto_shash_alg(desc->tfm)->init(desc); | 801 | return crypto_shash_alg(desc->tfm)->init(desc); |
350 | } | 802 | } |
351 | 803 | ||
804 | /** | ||
805 | * crypto_shash_update() - add data to message digest for processing | ||
806 | * @desc: operational state handle that is already initialized | ||
807 | * @data: input data to be added to the message digest | ||
808 | * @len: length of the input data | ||
809 | * | ||
810 | * Updates the message digest state of the operational state handle. | ||
811 | * | ||
812 | * Return: 0 if the message digest update was successful; < 0 if an error | ||
813 | * occurred | ||
814 | */ | ||
352 | int crypto_shash_update(struct shash_desc *desc, const u8 *data, | 815 | int crypto_shash_update(struct shash_desc *desc, const u8 *data, |
353 | unsigned int len); | 816 | unsigned int len); |
817 | |||
818 | /** | ||
819 | * crypto_shash_final() - calculate message digest | ||
820 | * @desc: operational state handle that is already filled with data | ||
821 | * @out: output buffer filled with the message digest | ||
822 | * | ||
823 | * Finalize the message digest operation and create the message digest | ||
824 | * based on all data added to the cipher handle. The message digest is placed | ||
825 | * into the output buffer. The caller must ensure that the output buffer is | ||
826 | * large enough by using crypto_shash_digestsize. | ||
827 | * | ||
828 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
829 | * occurred | ||
830 | */ | ||
354 | int crypto_shash_final(struct shash_desc *desc, u8 *out); | 831 | int crypto_shash_final(struct shash_desc *desc, u8 *out); |
832 | |||
833 | /** | ||
834 | * crypto_shash_finup() - calculate message digest of buffer | ||
835 | * @desc: see crypto_shash_final() | ||
836 | * @data: see crypto_shash_update() | ||
837 | * @len: see crypto_shash_update() | ||
838 | * @out: see crypto_shash_final() | ||
839 | * | ||
840 | * This function is a "short-hand" for the function calls of | ||
841 | * crypto_shash_update and crypto_shash_final. The parameters have the same | ||
842 | * meaning as discussed for those separate functions. | ||
843 | * | ||
844 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
845 | * occurred | ||
846 | */ | ||
355 | int crypto_shash_finup(struct shash_desc *desc, const u8 *data, | 847 | int crypto_shash_finup(struct shash_desc *desc, const u8 *data, |
356 | unsigned int len, u8 *out); | 848 | unsigned int len, u8 *out); |
357 | 849 | ||
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index d61c11170213..cd62bf4289e9 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h | |||
@@ -42,6 +42,7 @@ struct af_alg_completion { | |||
42 | struct af_alg_control { | 42 | struct af_alg_control { |
43 | struct af_alg_iv *iv; | 43 | struct af_alg_iv *iv; |
44 | int op; | 44 | int op; |
45 | unsigned int aead_assoclen; | ||
45 | }; | 46 | }; |
46 | 47 | ||
47 | struct af_alg_type { | 48 | struct af_alg_type { |
diff --git a/include/crypto/rng.h b/include/crypto/rng.h index c93f9b917925..a16fb10142bf 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h | |||
@@ -20,11 +20,38 @@ extern struct crypto_rng *crypto_default_rng; | |||
20 | int crypto_get_default_rng(void); | 20 | int crypto_get_default_rng(void); |
21 | void crypto_put_default_rng(void); | 21 | void crypto_put_default_rng(void); |
22 | 22 | ||
23 | /** | ||
24 | * DOC: Random number generator API | ||
25 | * | ||
26 | * The random number generator API is used with the ciphers of type | ||
27 | * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) | ||
28 | */ | ||
29 | |||
23 | static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) | 30 | static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) |
24 | { | 31 | { |
25 | return (struct crypto_rng *)tfm; | 32 | return (struct crypto_rng *)tfm; |
26 | } | 33 | } |
27 | 34 | ||
35 | /** | ||
36 | * crypto_alloc_rng() -- allocate RNG handle | ||
37 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
38 | * message digest cipher | ||
39 | * @type: specifies the type of the cipher | ||
40 | * @mask: specifies the mask for the cipher | ||
41 | * | ||
42 | * Allocate a cipher handle for a random number generator. The returned struct | ||
43 | * crypto_rng is the cipher handle that is required for any subsequent | ||
44 | * API invocation for that random number generator. | ||
45 | * | ||
46 | * For all random number generators, this call creates a new private copy of | ||
47 | * the random number generator that does not share a state with other | ||
48 | * instances. The only exception is the "krng" random number generator which | ||
49 | * is a kernel crypto API use case for the get_random_bytes() function of the | ||
50 | * /dev/random driver. | ||
51 | * | ||
52 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
53 | * of an error, PTR_ERR() returns the error code. | ||
54 | */ | ||
28 | static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name, | 55 | static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name, |
29 | u32 type, u32 mask) | 56 | u32 type, u32 mask) |
30 | { | 57 | { |
@@ -40,6 +67,14 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) | |||
40 | return &tfm->base; | 67 | return &tfm->base; |
41 | } | 68 | } |
42 | 69 | ||
70 | /** | ||
71 | * crypto_rng_alg - obtain name of RNG | ||
72 | * @tfm: cipher handle | ||
73 | * | ||
74 | * Return the generic name (cra_name) of the initialized random number generator | ||
75 | * | ||
76 | * Return: generic name string | ||
77 | */ | ||
43 | static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) | 78 | static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) |
44 | { | 79 | { |
45 | return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; | 80 | return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; |
@@ -50,23 +85,68 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm) | |||
50 | return &crypto_rng_tfm(tfm)->crt_rng; | 85 | return &crypto_rng_tfm(tfm)->crt_rng; |
51 | } | 86 | } |
52 | 87 | ||
88 | /** | ||
89 | * crypto_free_rng() - zeroize and free RNG handle | ||
90 | * @tfm: cipher handle to be freed | ||
91 | */ | ||
53 | static inline void crypto_free_rng(struct crypto_rng *tfm) | 92 | static inline void crypto_free_rng(struct crypto_rng *tfm) |
54 | { | 93 | { |
55 | crypto_free_tfm(crypto_rng_tfm(tfm)); | 94 | crypto_free_tfm(crypto_rng_tfm(tfm)); |
56 | } | 95 | } |
57 | 96 | ||
97 | /** | ||
98 | * crypto_rng_get_bytes() - get random number | ||
99 | * @tfm: cipher handle | ||
100 | * @rdata: output buffer holding the random numbers | ||
101 | * @dlen: length of the output buffer | ||
102 | * | ||
103 | * This function fills the caller-allocated buffer with random numbers using the | ||
104 | * random number generator referenced by the cipher handle. | ||
105 | * | ||
106 | * Return: > 0 function was successful and returns the number of generated | ||
107 | * bytes; < 0 if an error occurred | ||
108 | */ | ||
58 | static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, | 109 | static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, |
59 | u8 *rdata, unsigned int dlen) | 110 | u8 *rdata, unsigned int dlen) |
60 | { | 111 | { |
61 | return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen); | 112 | return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen); |
62 | } | 113 | } |
63 | 114 | ||
115 | /** | ||
116 | * crypto_rng_reset() - re-initialize the RNG | ||
117 | * @tfm: cipher handle | ||
118 | * @seed: seed input data | ||
119 | * @slen: length of the seed input data | ||
120 | * | ||
121 | * The reset function completely re-initializes the random number generator | ||
122 | * referenced by the cipher handle by clearing the current state. The new state | ||
123 | * is initialized with the caller provided seed or automatically, depending | ||
124 | * on the random number generator type (the ANSI X9.31 RNG requires | ||
125 | * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). | ||
126 | * The seed is provided as a parameter to this function call. The provided seed | ||
127 | * should have the length of the seed size defined for the random number | ||
128 | * generator as defined by crypto_rng_seedsize. | ||
129 | * | ||
130 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
131 | */ | ||
64 | static inline int crypto_rng_reset(struct crypto_rng *tfm, | 132 | static inline int crypto_rng_reset(struct crypto_rng *tfm, |
65 | u8 *seed, unsigned int slen) | 133 | u8 *seed, unsigned int slen) |
66 | { | 134 | { |
67 | return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen); | 135 | return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen); |
68 | } | 136 | } |
69 | 137 | ||
138 | /** | ||
139 | * crypto_rng_seedsize() - obtain seed size of RNG | ||
140 | * @tfm: cipher handle | ||
141 | * | ||
142 | * The function returns the seed size for the random number generator | ||
143 | * referenced by the cipher handle. This value may be zero if the random | ||
144 | * number generator does not implement or require a reseeding. For example, | ||
145 | * the SP800-90A DRBGs implement an automated reseeding after reaching a | ||
146 | * pre-defined threshold. | ||
147 | * | ||
148 | * Return: seed size for the random number generator | ||
149 | */ | ||
70 | static inline int crypto_rng_seedsize(struct crypto_rng *tfm) | 150 | static inline int crypto_rng_seedsize(struct crypto_rng *tfm) |
71 | { | 151 | { |
72 | return crypto_rng_alg(tfm)->seedsize; | 152 | return crypto_rng_alg(tfm)->seedsize; |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d45e949699ea..9c8776d0ada8 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -26,6 +26,19 @@ | |||
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Autoloaded crypto modules should only use a prefixed name to avoid allowing | ||
30 | * arbitrary modules to be loaded. Loading from userspace may still need the | ||
31 | * unprefixed names, so retains those aliases as well. | ||
32 | * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 | ||
33 | * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro | ||
34 | * expands twice on the same line. Instead, use a separate base name for the | ||
35 | * alias. | ||
36 | */ | ||
37 | #define MODULE_ALIAS_CRYPTO(name) \ | ||
38 | __MODULE_INFO(alias, alias_userspace, name); \ | ||
39 | __MODULE_INFO(alias, alias_crypto, "crypto-" name) | ||
40 | |||
41 | /* | ||
29 | * Algorithm masks and types. | 42 | * Algorithm masks and types. |
30 | */ | 43 | */ |
31 | #define CRYPTO_ALG_TYPE_MASK 0x0000000f | 44 | #define CRYPTO_ALG_TYPE_MASK 0x0000000f |
@@ -127,6 +140,13 @@ struct skcipher_givcrypt_request; | |||
127 | 140 | ||
128 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); | 141 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); |
129 | 142 | ||
143 | /** | ||
144 | * DOC: Block Cipher Context Data Structures | ||
145 | * | ||
146 | * These data structures define the operating context for each block cipher | ||
147 | * type. | ||
148 | */ | ||
149 | |||
130 | struct crypto_async_request { | 150 | struct crypto_async_request { |
131 | struct list_head list; | 151 | struct list_head list; |
132 | crypto_completion_t complete; | 152 | crypto_completion_t complete; |
@@ -194,9 +214,63 @@ struct hash_desc { | |||
194 | u32 flags; | 214 | u32 flags; |
195 | }; | 215 | }; |
196 | 216 | ||
197 | /* | 217 | /** |
198 | * Algorithms: modular crypto algorithm implementations, managed | 218 | * DOC: Block Cipher Algorithm Definitions |
199 | * via crypto_register_alg() and crypto_unregister_alg(). | 219 | * |
220 | * These data structures define modular crypto algorithm implementations, | ||
221 | * managed via crypto_register_alg() and crypto_unregister_alg(). | ||
222 | */ | ||
223 | |||
224 | /** | ||
225 | * struct ablkcipher_alg - asynchronous block cipher definition | ||
226 | * @min_keysize: Minimum key size supported by the transformation. This is the | ||
227 | * smallest key length supported by this transformation algorithm. | ||
228 | * This must be set to one of the pre-defined values as this is | ||
229 | * not hardware specific. Possible values for this field can be | ||
230 | * found via git grep "_MIN_KEY_SIZE" include/crypto/ | ||
231 | * @max_keysize: Maximum key size supported by the transformation. This is the | ||
232 | * largest key length supported by this transformation algorithm. | ||
233 | * This must be set to one of the pre-defined values as this is | ||
234 | * not hardware specific. Possible values for this field can be | ||
235 | * found via git grep "_MAX_KEY_SIZE" include/crypto/ | ||
236 | * @setkey: Set key for the transformation. This function is used to either | ||
237 | * program a supplied key into the hardware or store the key in the | ||
238 | * transformation context for programming it later. Note that this | ||
239 | * function does modify the transformation context. This function can | ||
240 | * be called multiple times during the existence of the transformation | ||
241 | * object, so one must make sure the key is properly reprogrammed into | ||
242 | * the hardware. This function is also responsible for checking the key | ||
243 | * length for validity. In case a software fallback was put in place in | ||
244 | * the @cra_init call, this function might need to use the fallback if | ||
245 | * the algorithm doesn't support all of the key sizes. | ||
246 | * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt | ||
247 | * the supplied scatterlist containing the blocks of data. The crypto | ||
248 | * API consumer is responsible for aligning the entries of the | ||
249 | * scatterlist properly and making sure the chunks are correctly | ||
250 | * sized. In case a software fallback was put in place in the | ||
251 | * @cra_init call, this function might need to use the fallback if | ||
252 | * the algorithm doesn't support all of the key sizes. In case the | ||
253 | * key was stored in transformation context, the key might need to be | ||
254 | * re-programmed into the hardware in this function. This function | ||
255 | * shall not modify the transformation context, as this function may | ||
256 | * be called in parallel with the same transformation object. | ||
257 | * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt | ||
258 | * and the conditions are exactly the same. | ||
259 | * @givencrypt: Update the IV for encryption. With this function, a cipher | ||
260 | * implementation may provide the function on how to update the IV | ||
261 | * for encryption. | ||
262 | * @givdecrypt: Update the IV for decryption. This is the reverse of | ||
263 | * @givencrypt . | ||
264 | * @geniv: The transformation implementation may use an "IV generator" provided | ||
265 | * by the kernel crypto API. Several use cases have a predefined | ||
266 | * approach how IVs are to be updated. For such use cases, the kernel | ||
267 | * crypto API provides ready-to-use implementations that can be | ||
268 | * referenced with this variable. | ||
269 | * @ivsize: IV size applicable for transformation. The consumer must provide an | ||
270 | * IV of exactly that size to perform the encrypt or decrypt operation. | ||
271 | * | ||
272 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | ||
273 | * mandatory and must be filled. | ||
200 | */ | 274 | */ |
201 | struct ablkcipher_alg { | 275 | struct ablkcipher_alg { |
202 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, | 276 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, |
@@ -213,6 +287,32 @@ struct ablkcipher_alg { | |||
213 | unsigned int ivsize; | 287 | unsigned int ivsize; |
214 | }; | 288 | }; |
215 | 289 | ||
290 | /** | ||
291 | * struct aead_alg - AEAD cipher definition | ||
292 | * @maxauthsize: Set the maximum authentication tag size supported by the | ||
293 | * transformation. A transformation may support smaller tag sizes. | ||
294 | * As the authentication tag is a message digest to ensure the | ||
295 | * integrity of the encrypted data, a consumer typically wants the | ||
296 | * largest authentication tag possible as defined by this | ||
297 | * variable. | ||
298 | * @setauthsize: Set authentication size for the AEAD transformation. This | ||
299 | * function is used to specify the consumer requested size of the | ||
300 | * authentication tag to be either generated by the transformation | ||
301 | * during encryption or the size of the authentication tag to be | ||
302 | * supplied during the decryption operation. This function is also | ||
303 | * responsible for checking the authentication tag size for | ||
304 | * validity. | ||
305 | * @setkey: see struct ablkcipher_alg | ||
306 | * @encrypt: see struct ablkcipher_alg | ||
307 | * @decrypt: see struct ablkcipher_alg | ||
308 | * @givencrypt: see struct ablkcipher_alg | ||
309 | * @givdecrypt: see struct ablkcipher_alg | ||
310 | * @geniv: see struct ablkcipher_alg | ||
311 | * @ivsize: see struct ablkcipher_alg | ||
312 | * | ||
313 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | ||
314 | * mandatory and must be filled. | ||
315 | */ | ||
216 | struct aead_alg { | 316 | struct aead_alg { |
217 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | 317 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, |
218 | unsigned int keylen); | 318 | unsigned int keylen); |
@@ -228,6 +328,18 @@ struct aead_alg { | |||
228 | unsigned int maxauthsize; | 328 | unsigned int maxauthsize; |
229 | }; | 329 | }; |
230 | 330 | ||
331 | /** | ||
332 | * struct blkcipher_alg - synchronous block cipher definition | ||
333 | * @min_keysize: see struct ablkcipher_alg | ||
334 | * @max_keysize: see struct ablkcipher_alg | ||
335 | * @setkey: see struct ablkcipher_alg | ||
336 | * @encrypt: see struct ablkcipher_alg | ||
337 | * @decrypt: see struct ablkcipher_alg | ||
338 | * @geniv: see struct ablkcipher_alg | ||
339 | * @ivsize: see struct ablkcipher_alg | ||
340 | * | ||
341 | * All fields except @geniv and @ivsize are mandatory and must be filled. | ||
342 | */ | ||
231 | struct blkcipher_alg { | 343 | struct blkcipher_alg { |
232 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, | 344 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, |
233 | unsigned int keylen); | 345 | unsigned int keylen); |
@@ -245,6 +357,53 @@ struct blkcipher_alg { | |||
245 | unsigned int ivsize; | 357 | unsigned int ivsize; |
246 | }; | 358 | }; |
247 | 359 | ||
360 | /** | ||
361 | * struct cipher_alg - single-block symmetric ciphers definition | ||
362 | * @cia_min_keysize: Minimum key size supported by the transformation. This is | ||
363 | * the smallest key length supported by this transformation | ||
364 | * algorithm. This must be set to one of the pre-defined | ||
365 | * values as this is not hardware specific. Possible values | ||
366 | * for this field can be found via git grep "_MIN_KEY_SIZE" | ||
367 | * include/crypto/ | ||
368 | * @cia_max_keysize: Maximum key size supported by the transformation. This is | ||
369 | * the largest key length supported by this transformation | ||
370 | * algorithm. This must be set to one of the pre-defined values | ||
371 | * as this is not hardware specific. Possible values for this | ||
372 | * field can be found via git grep "_MAX_KEY_SIZE" | ||
373 | * include/crypto/ | ||
374 | * @cia_setkey: Set key for the transformation. This function is used to either | ||
375 | * program a supplied key into the hardware or store the key in the | ||
376 | * transformation context for programming it later. Note that this | ||
377 | * function does modify the transformation context. This function | ||
378 | * can be called multiple times during the existence of the | ||
379 | * transformation object, so one must make sure the key is properly | ||
380 | * reprogrammed into the hardware. This function is also | ||
381 | * responsible for checking the key length for validity. | ||
382 | * @cia_encrypt: Encrypt a single block. This function is used to encrypt a | ||
383 | * single block of data, which must be @cra_blocksize big. This | ||
384 | * always operates on a full @cra_blocksize and it is not possible | ||
385 | * to encrypt a block of smaller size. The supplied buffers must | ||
386 | * therefore also be at least of @cra_blocksize size. Both the | ||
387 | * input and output buffers are always aligned to @cra_alignmask. | ||
388 | * In case either of the input or output buffer supplied by user | ||
389 | * of the crypto API is not aligned to @cra_alignmask, the crypto | ||
390 | * API will re-align the buffers. The re-alignment means that a | ||
391 | * new buffer will be allocated, the data will be copied into the | ||
392 | * new buffer, then the processing will happen on the new buffer, | ||
393 | * then the data will be copied back into the original buffer and | ||
394 | * finally the new buffer will be freed. In case a software | ||
395 | * fallback was put in place in the @cra_init call, this function | ||
396 | * might need to use the fallback if the algorithm doesn't support | ||
397 | * all of the key sizes. In case the key was stored in | ||
398 | * transformation context, the key might need to be re-programmed | ||
399 | * into the hardware in this function. This function shall not | ||
400 | * modify the transformation context, as this function may be | ||
401 | * called in parallel with the same transformation object. | ||
402 | * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to | ||
403 | * @cia_encrypt, and the conditions are exactly the same. | ||
404 | * | ||
405 | * All fields are mandatory and must be filled. | ||
406 | */ | ||
248 | struct cipher_alg { | 407 | struct cipher_alg { |
249 | unsigned int cia_min_keysize; | 408 | unsigned int cia_min_keysize; |
250 | unsigned int cia_max_keysize; | 409 | unsigned int cia_max_keysize; |
@@ -261,6 +420,25 @@ struct compress_alg { | |||
261 | unsigned int slen, u8 *dst, unsigned int *dlen); | 420 | unsigned int slen, u8 *dst, unsigned int *dlen); |
262 | }; | 421 | }; |
263 | 422 | ||
423 | /** | ||
424 | * struct rng_alg - random number generator definition | ||
425 | * @rng_make_random: The function defined by this variable obtains a random | ||
426 | * number. The random number generator transform must generate | ||
427 | * the random number out of the context provided with this | ||
428 | * call. | ||
429 | * @rng_reset: Reset of the random number generator by clearing the entire state. | ||
430 | * With the invocation of this function call, the random number | ||
431 | * generator shall completely reinitialize its state. If the random | ||
432 | * number generator requires a seed for setting up a new state, | ||
433 | * the seed must be provided by the consumer while invoking this | ||
434 | * function. The required size of the seed is defined with | ||
435 | * @seedsize . | ||
436 | * @seedsize: The seed size required for a random number generator | ||
437 | * initialization defined with this variable. Some random number | ||
438 | * generators like the SP800-90A DRBG does not require a seed as the | ||
439 | * seeding is implemented internally without the need of support by | ||
440 | * the consumer. In this case, the seed size is set to zero. | ||
441 | */ | ||
264 | struct rng_alg { | 442 | struct rng_alg { |
265 | int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, | 443 | int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, |
266 | unsigned int dlen); | 444 | unsigned int dlen); |
@@ -277,6 +455,81 @@ struct rng_alg { | |||
277 | #define cra_compress cra_u.compress | 455 | #define cra_compress cra_u.compress |
278 | #define cra_rng cra_u.rng | 456 | #define cra_rng cra_u.rng |
279 | 457 | ||
458 | /** | ||
459 | * struct crypto_alg - definition of a cryptograpic cipher algorithm | ||
460 | * @cra_flags: Flags describing this transformation. See include/linux/crypto.h | ||
461 | * CRYPTO_ALG_* flags for the flags which go in here. Those are | ||
462 | * used for fine-tuning the description of the transformation | ||
463 | * algorithm. | ||
464 | * @cra_blocksize: Minimum block size of this transformation. The size in bytes | ||
465 | * of the smallest possible unit which can be transformed with | ||
466 | * this algorithm. The users must respect this value. | ||
467 | * In case of HASH transformation, it is possible for a smaller | ||
468 | * block than @cra_blocksize to be passed to the crypto API for | ||
469 | * transformation, in case of any other transformation type, an | ||
470 | * error will be returned upon any attempt to transform smaller | ||
471 | * than @cra_blocksize chunks. | ||
472 | * @cra_ctxsize: Size of the operational context of the transformation. This | ||
473 | * value informs the kernel crypto API about the memory size | ||
474 | * needed to be allocated for the transformation context. | ||
475 | * @cra_alignmask: Alignment mask for the input and output data buffer. The data | ||
476 | * buffer containing the input data for the algorithm must be | ||
477 | * aligned to this alignment mask. The data buffer for the | ||
478 | * output data must be aligned to this alignment mask. Note that | ||
479 | * the Crypto API will do the re-alignment in software, but | ||
480 | * only under special conditions and there is a performance hit. | ||
481 | * The re-alignment happens at these occasions for different | ||
482 | * @cra_u types: cipher -- For both input data and output data | ||
483 | * buffer; ahash -- For output hash destination buf; shash -- | ||
484 | * For output hash destination buf. | ||
485 | * This is needed on hardware which is flawed by design and | ||
486 | * cannot pick data from arbitrary addresses. | ||
487 | * @cra_priority: Priority of this transformation implementation. In case | ||
488 | * multiple transformations with same @cra_name are available to | ||
489 | * the Crypto API, the kernel will use the one with highest | ||
490 | * @cra_priority. | ||
491 | * @cra_name: Generic name (usable by multiple implementations) of the | ||
492 | * transformation algorithm. This is the name of the transformation | ||
493 | * itself. This field is used by the kernel when looking up the | ||
494 | * providers of particular transformation. | ||
495 | * @cra_driver_name: Unique name of the transformation provider. This is the | ||
496 | * name of the provider of the transformation. This can be any | ||
497 | * arbitrary value, but in the usual case, this contains the | ||
498 | * name of the chip or provider and the name of the | ||
499 | * transformation algorithm. | ||
500 | * @cra_type: Type of the cryptographic transformation. This is a pointer to | ||
501 | * struct crypto_type, which implements callbacks common for all | ||
502 | * trasnformation types. There are multiple options: | ||
503 | * &crypto_blkcipher_type, &crypto_ablkcipher_type, | ||
504 | * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. | ||
505 | * This field might be empty. In that case, there are no common | ||
506 | * callbacks. This is the case for: cipher, compress, shash. | ||
507 | * @cra_u: Callbacks implementing the transformation. This is a union of | ||
508 | * multiple structures. Depending on the type of transformation selected | ||
509 | * by @cra_type and @cra_flags above, the associated structure must be | ||
510 | * filled with callbacks. This field might be empty. This is the case | ||
511 | * for ahash, shash. | ||
512 | * @cra_init: Initialize the cryptographic transformation object. This function | ||
513 | * is used to initialize the cryptographic transformation object. | ||
514 | * This function is called only once at the instantiation time, right | ||
515 | * after the transformation context was allocated. In case the | ||
516 | * cryptographic hardware has some special requirements which need to | ||
517 | * be handled by software, this function shall check for the precise | ||
518 | * requirement of the transformation and put any software fallbacks | ||
519 | * in place. | ||
520 | * @cra_exit: Deinitialize the cryptographic transformation object. This is a | ||
521 | * counterpart to @cra_init, used to remove various changes set in | ||
522 | * @cra_init. | ||
523 | * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE | ||
524 | * @cra_list: internally used | ||
525 | * @cra_users: internally used | ||
526 | * @cra_refcnt: internally used | ||
527 | * @cra_destroy: internally used | ||
528 | * | ||
529 | * The struct crypto_alg describes a generic Crypto API algorithm and is common | ||
530 | * for all of the transformations. Any variable not documented here shall not | ||
531 | * be used by a cipher implementation as it is internal to the Crypto API. | ||
532 | */ | ||
280 | struct crypto_alg { | 533 | struct crypto_alg { |
281 | struct list_head cra_list; | 534 | struct list_head cra_list; |
282 | struct list_head cra_users; | 535 | struct list_head cra_users; |
@@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask) | |||
581 | return mask; | 834 | return mask; |
582 | } | 835 | } |
583 | 836 | ||
837 | /** | ||
838 | * DOC: Asynchronous Block Cipher API | ||
839 | * | ||
840 | * Asynchronous block cipher API is used with the ciphers of type | ||
841 | * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). | ||
842 | * | ||
843 | * Asynchronous cipher operations imply that the function invocation for a | ||
844 | * cipher request returns immediately before the completion of the operation. | ||
845 | * The cipher request is scheduled as a separate kernel thread and therefore | ||
846 | * load-balanced on the different CPUs via the process scheduler. To allow | ||
847 | * the kernel crypto API to inform the caller about the completion of a cipher | ||
848 | * request, the caller must provide a callback function. That function is | ||
849 | * invoked with the cipher handle when the request completes. | ||
850 | * | ||
851 | * To support the asynchronous operation, additional information than just the | ||
852 | * cipher handle must be supplied to the kernel crypto API. That additional | ||
853 | * information is given by filling in the ablkcipher_request data structure. | ||
854 | * | ||
855 | * For the asynchronous block cipher API, the state is maintained with the tfm | ||
856 | * cipher handle. A single tfm can be used across multiple calls and in | ||
857 | * parallel. For asynchronous block cipher calls, context data supplied and | ||
858 | * only used by the caller can be referenced the request data structure in | ||
859 | * addition to the IV used for the cipher request. The maintenance of such | ||
860 | * state information would be important for a crypto driver implementer to | ||
861 | * have, because when calling the callback function upon completion of the | ||
862 | * cipher operation, that callback function may need some information about | ||
863 | * which operation just finished if it invoked multiple in parallel. This | ||
864 | * state information is unused by the kernel crypto API. | ||
865 | */ | ||
866 | |||
867 | /** | ||
868 | * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle | ||
869 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
870 | * ablkcipher cipher | ||
871 | * @type: specifies the type of the cipher | ||
872 | * @mask: specifies the mask for the cipher | ||
873 | * | ||
874 | * Allocate a cipher handle for an ablkcipher. The returned struct | ||
875 | * crypto_ablkcipher is the cipher handle that is required for any subsequent | ||
876 | * API invocation for that ablkcipher. | ||
877 | * | ||
878 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
879 | * of an error, PTR_ERR() returns the error code. | ||
880 | */ | ||
584 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, | 881 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, |
585 | u32 type, u32 mask); | 882 | u32 type, u32 mask); |
586 | 883 | ||
@@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm( | |||
590 | return &tfm->base; | 887 | return &tfm->base; |
591 | } | 888 | } |
592 | 889 | ||
890 | /** | ||
891 | * crypto_free_ablkcipher() - zeroize and free cipher handle | ||
892 | * @tfm: cipher handle to be freed | ||
893 | */ | ||
593 | static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) | 894 | static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) |
594 | { | 895 | { |
595 | crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); | 896 | crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); |
596 | } | 897 | } |
597 | 898 | ||
899 | /** | ||
900 | * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. | ||
901 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
902 | * ablkcipher | ||
903 | * @type: specifies the type of the cipher | ||
904 | * @mask: specifies the mask for the cipher | ||
905 | * | ||
906 | * Return: true when the ablkcipher is known to the kernel crypto API; false | ||
907 | * otherwise | ||
908 | */ | ||
598 | static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, | 909 | static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, |
599 | u32 mask) | 910 | u32 mask) |
600 | { | 911 | { |
@@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( | |||
608 | return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; | 919 | return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; |
609 | } | 920 | } |
610 | 921 | ||
922 | /** | ||
923 | * crypto_ablkcipher_ivsize() - obtain IV size | ||
924 | * @tfm: cipher handle | ||
925 | * | ||
926 | * The size of the IV for the ablkcipher referenced by the cipher handle is | ||
927 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
928 | * | ||
929 | * Return: IV size in bytes | ||
930 | */ | ||
611 | static inline unsigned int crypto_ablkcipher_ivsize( | 931 | static inline unsigned int crypto_ablkcipher_ivsize( |
612 | struct crypto_ablkcipher *tfm) | 932 | struct crypto_ablkcipher *tfm) |
613 | { | 933 | { |
614 | return crypto_ablkcipher_crt(tfm)->ivsize; | 934 | return crypto_ablkcipher_crt(tfm)->ivsize; |
615 | } | 935 | } |
616 | 936 | ||
937 | /** | ||
938 | * crypto_ablkcipher_blocksize() - obtain block size of cipher | ||
939 | * @tfm: cipher handle | ||
940 | * | ||
941 | * The block size for the ablkcipher referenced with the cipher handle is | ||
942 | * returned. The caller may use that information to allocate appropriate | ||
943 | * memory for the data returned by the encryption or decryption operation | ||
944 | * | ||
945 | * Return: block size of cipher | ||
946 | */ | ||
617 | static inline unsigned int crypto_ablkcipher_blocksize( | 947 | static inline unsigned int crypto_ablkcipher_blocksize( |
618 | struct crypto_ablkcipher *tfm) | 948 | struct crypto_ablkcipher *tfm) |
619 | { | 949 | { |
@@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, | |||
643 | crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); | 973 | crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); |
644 | } | 974 | } |
645 | 975 | ||
976 | /** | ||
977 | * crypto_ablkcipher_setkey() - set key for cipher | ||
978 | * @tfm: cipher handle | ||
979 | * @key: buffer holding the key | ||
980 | * @keylen: length of the key in bytes | ||
981 | * | ||
982 | * The caller provided key is set for the ablkcipher referenced by the cipher | ||
983 | * handle. | ||
984 | * | ||
985 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
986 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
987 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
988 | * is performed. | ||
989 | * | ||
990 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
991 | */ | ||
646 | static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | 992 | static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, |
647 | const u8 *key, unsigned int keylen) | 993 | const u8 *key, unsigned int keylen) |
648 | { | 994 | { |
@@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | |||
651 | return crt->setkey(crt->base, key, keylen); | 997 | return crt->setkey(crt->base, key, keylen); |
652 | } | 998 | } |
653 | 999 | ||
1000 | /** | ||
1001 | * crypto_ablkcipher_reqtfm() - obtain cipher handle from request | ||
1002 | * @req: ablkcipher_request out of which the cipher handle is to be obtained | ||
1003 | * | ||
1004 | * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request | ||
1005 | * data structure. | ||
1006 | * | ||
1007 | * Return: crypto_ablkcipher handle | ||
1008 | */ | ||
654 | static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( | 1009 | static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( |
655 | struct ablkcipher_request *req) | 1010 | struct ablkcipher_request *req) |
656 | { | 1011 | { |
657 | return __crypto_ablkcipher_cast(req->base.tfm); | 1012 | return __crypto_ablkcipher_cast(req->base.tfm); |
658 | } | 1013 | } |
659 | 1014 | ||
1015 | /** | ||
1016 | * crypto_ablkcipher_encrypt() - encrypt plaintext | ||
1017 | * @req: reference to the ablkcipher_request handle that holds all information | ||
1018 | * needed to perform the cipher operation | ||
1019 | * | ||
1020 | * Encrypt plaintext data using the ablkcipher_request handle. That data | ||
1021 | * structure and how it is filled with data is discussed with the | ||
1022 | * ablkcipher_request_* functions. | ||
1023 | * | ||
1024 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1025 | */ | ||
660 | static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) | 1026 | static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) |
661 | { | 1027 | { |
662 | struct ablkcipher_tfm *crt = | 1028 | struct ablkcipher_tfm *crt = |
@@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) | |||
664 | return crt->encrypt(req); | 1030 | return crt->encrypt(req); |
665 | } | 1031 | } |
666 | 1032 | ||
1033 | /** | ||
1034 | * crypto_ablkcipher_decrypt() - decrypt ciphertext | ||
1035 | * @req: reference to the ablkcipher_request handle that holds all information | ||
1036 | * needed to perform the cipher operation | ||
1037 | * | ||
1038 | * Decrypt ciphertext data using the ablkcipher_request handle. That data | ||
1039 | * structure and how it is filled with data is discussed with the | ||
1040 | * ablkcipher_request_* functions. | ||
1041 | * | ||
1042 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1043 | */ | ||
667 | static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) | 1044 | static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) |
668 | { | 1045 | { |
669 | struct ablkcipher_tfm *crt = | 1046 | struct ablkcipher_tfm *crt = |
@@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) | |||
671 | return crt->decrypt(req); | 1048 | return crt->decrypt(req); |
672 | } | 1049 | } |
673 | 1050 | ||
1051 | /** | ||
1052 | * DOC: Asynchronous Cipher Request Handle | ||
1053 | * | ||
1054 | * The ablkcipher_request data structure contains all pointers to data | ||
1055 | * required for the asynchronous cipher operation. This includes the cipher | ||
1056 | * handle (which can be used by multiple ablkcipher_request instances), pointer | ||
1057 | * to plaintext and ciphertext, asynchronous callback function, etc. It acts | ||
1058 | * as a handle to the ablkcipher_request_* API calls in a similar way as | ||
1059 | * ablkcipher handle to the crypto_ablkcipher_* API calls. | ||
1060 | */ | ||
1061 | |||
1062 | /** | ||
1063 | * crypto_ablkcipher_reqsize() - obtain size of the request data structure | ||
1064 | * @tfm: cipher handle | ||
1065 | * | ||
1066 | * Return: number of bytes | ||
1067 | */ | ||
674 | static inline unsigned int crypto_ablkcipher_reqsize( | 1068 | static inline unsigned int crypto_ablkcipher_reqsize( |
675 | struct crypto_ablkcipher *tfm) | 1069 | struct crypto_ablkcipher *tfm) |
676 | { | 1070 | { |
677 | return crypto_ablkcipher_crt(tfm)->reqsize; | 1071 | return crypto_ablkcipher_crt(tfm)->reqsize; |
678 | } | 1072 | } |
679 | 1073 | ||
1074 | /** | ||
1075 | * ablkcipher_request_set_tfm() - update cipher handle reference in request | ||
1076 | * @req: request handle to be modified | ||
1077 | * @tfm: cipher handle that shall be added to the request handle | ||
1078 | * | ||
1079 | * Allow the caller to replace the existing ablkcipher handle in the request | ||
1080 | * data structure with a different one. | ||
1081 | */ | ||
680 | static inline void ablkcipher_request_set_tfm( | 1082 | static inline void ablkcipher_request_set_tfm( |
681 | struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) | 1083 | struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) |
682 | { | 1084 | { |
@@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast( | |||
689 | return container_of(req, struct ablkcipher_request, base); | 1091 | return container_of(req, struct ablkcipher_request, base); |
690 | } | 1092 | } |
691 | 1093 | ||
1094 | /** | ||
1095 | * ablkcipher_request_alloc() - allocate request data structure | ||
1096 | * @tfm: cipher handle to be registered with the request | ||
1097 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
1098 | * | ||
1099 | * Allocate the request data structure that must be used with the ablkcipher | ||
1100 | * encrypt and decrypt API calls. During the allocation, the provided ablkcipher | ||
1101 | * handle is registered in the request data structure. | ||
1102 | * | ||
1103 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
1104 | * of an error, PTR_ERR() returns the error code. | ||
1105 | */ | ||
692 | static inline struct ablkcipher_request *ablkcipher_request_alloc( | 1106 | static inline struct ablkcipher_request *ablkcipher_request_alloc( |
693 | struct crypto_ablkcipher *tfm, gfp_t gfp) | 1107 | struct crypto_ablkcipher *tfm, gfp_t gfp) |
694 | { | 1108 | { |
@@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( | |||
703 | return req; | 1117 | return req; |
704 | } | 1118 | } |
705 | 1119 | ||
1120 | /** | ||
1121 | * ablkcipher_request_free() - zeroize and free request data structure | ||
1122 | * @req: request data structure cipher handle to be freed | ||
1123 | */ | ||
706 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) | 1124 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) |
707 | { | 1125 | { |
708 | kzfree(req); | 1126 | kzfree(req); |
709 | } | 1127 | } |
710 | 1128 | ||
1129 | /** | ||
1130 | * ablkcipher_request_set_callback() - set asynchronous callback function | ||
1131 | * @req: request handle | ||
1132 | * @flags: specify zero or an ORing of the flags | ||
1133 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
1134 | * increase the wait queue beyond the initial maximum size; | ||
1135 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
1136 | * @compl: callback function pointer to be registered with the request handle | ||
1137 | * @data: The data pointer refers to memory that is not used by the kernel | ||
1138 | * crypto API, but provided to the callback function for it to use. Here, | ||
1139 | * the caller can provide a reference to memory the callback function can | ||
1140 | * operate on. As the callback function is invoked asynchronously to the | ||
1141 | * related functionality, it may need to access data structures of the | ||
1142 | * related functionality which can be referenced using this pointer. The | ||
1143 | * callback function can access the memory via the "data" field in the | ||
1144 | * crypto_async_request data structure provided to the callback function. | ||
1145 | * | ||
1146 | * This function allows setting the callback function that is triggered once the | ||
1147 | * cipher operation completes. | ||
1148 | * | ||
1149 | * The callback function is registered with the ablkcipher_request handle and | ||
1150 | * must comply with the following template: | ||
1151 | * | ||
1152 | * void callback_function(struct crypto_async_request *req, int error) | ||
1153 | */ | ||
711 | static inline void ablkcipher_request_set_callback( | 1154 | static inline void ablkcipher_request_set_callback( |
712 | struct ablkcipher_request *req, | 1155 | struct ablkcipher_request *req, |
713 | u32 flags, crypto_completion_t compl, void *data) | 1156 | u32 flags, crypto_completion_t compl, void *data) |
@@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback( | |||
717 | req->base.flags = flags; | 1160 | req->base.flags = flags; |
718 | } | 1161 | } |
719 | 1162 | ||
1163 | /** | ||
1164 | * ablkcipher_request_set_crypt() - set data buffers | ||
1165 | * @req: request handle | ||
1166 | * @src: source scatter / gather list | ||
1167 | * @dst: destination scatter / gather list | ||
1168 | * @nbytes: number of bytes to process from @src | ||
1169 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
1170 | * by crypto_ablkcipher_ivsize | ||
1171 | * | ||
1172 | * This function allows setting of the source data and destination data | ||
1173 | * scatter / gather lists. | ||
1174 | * | ||
1175 | * For encryption, the source is treated as the plaintext and the | ||
1176 | * destination is the ciphertext. For a decryption operation, the use is | ||
1177 | * reversed: the source is the ciphertext and the destination is the plaintext. | ||
1178 | */ | ||
720 | static inline void ablkcipher_request_set_crypt( | 1179 | static inline void ablkcipher_request_set_crypt( |
721 | struct ablkcipher_request *req, | 1180 | struct ablkcipher_request *req, |
722 | struct scatterlist *src, struct scatterlist *dst, | 1181 | struct scatterlist *src, struct scatterlist *dst, |
@@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt( | |||
728 | req->info = iv; | 1187 | req->info = iv; |
729 | } | 1188 | } |
730 | 1189 | ||
1190 | /** | ||
1191 | * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API | ||
1192 | * | ||
1193 | * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD | ||
1194 | * (listed as type "aead" in /proc/crypto) | ||
1195 | * | ||
1196 | * The most prominent examples for this type of encryption is GCM and CCM. | ||
1197 | * However, the kernel supports other types of AEAD ciphers which are defined | ||
1198 | * with the following cipher string: | ||
1199 | * | ||
1200 | * authenc(keyed message digest, block cipher) | ||
1201 | * | ||
1202 | * For example: authenc(hmac(sha256), cbc(aes)) | ||
1203 | * | ||
1204 | * The example code provided for the asynchronous block cipher operation | ||
1205 | * applies here as well. Naturally all *ablkcipher* symbols must be exchanged | ||
1206 | * the *aead* pendants discussed in the following. In addtion, for the AEAD | ||
1207 | * operation, the aead_request_set_assoc function must be used to set the | ||
1208 | * pointer to the associated data memory location before performing the | ||
1209 | * encryption or decryption operation. In case of an encryption, the associated | ||
1210 | * data memory is filled during the encryption operation. For decryption, the | ||
1211 | * associated data memory must contain data that is used to verify the integrity | ||
1212 | * of the decrypted data. Another deviation from the asynchronous block cipher | ||
1213 | * operation is that the caller should explicitly check for -EBADMSG of the | ||
1214 | * crypto_aead_decrypt. That error indicates an authentication error, i.e. | ||
1215 | * a breach in the integrity of the message. In essence, that -EBADMSG error | ||
1216 | * code is the key bonus an AEAD cipher has over "standard" block chaining | ||
1217 | * modes. | ||
1218 | */ | ||
1219 | |||
731 | static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) | 1220 | static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) |
732 | { | 1221 | { |
733 | return (struct crypto_aead *)tfm; | 1222 | return (struct crypto_aead *)tfm; |
734 | } | 1223 | } |
735 | 1224 | ||
1225 | /** | ||
1226 | * crypto_alloc_aead() - allocate AEAD cipher handle | ||
1227 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1228 | * AEAD cipher | ||
1229 | * @type: specifies the type of the cipher | ||
1230 | * @mask: specifies the mask for the cipher | ||
1231 | * | ||
1232 | * Allocate a cipher handle for an AEAD. The returned struct | ||
1233 | * crypto_aead is the cipher handle that is required for any subsequent | ||
1234 | * API invocation for that AEAD. | ||
1235 | * | ||
1236 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
1237 | * of an error, PTR_ERR() returns the error code. | ||
1238 | */ | ||
736 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); | 1239 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); |
737 | 1240 | ||
738 | static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | 1241 | static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) |
@@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | |||
740 | return &tfm->base; | 1243 | return &tfm->base; |
741 | } | 1244 | } |
742 | 1245 | ||
1246 | /** | ||
1247 | * crypto_free_aead() - zeroize and free aead handle | ||
1248 | * @tfm: cipher handle to be freed | ||
1249 | */ | ||
743 | static inline void crypto_free_aead(struct crypto_aead *tfm) | 1250 | static inline void crypto_free_aead(struct crypto_aead *tfm) |
744 | { | 1251 | { |
745 | crypto_free_tfm(crypto_aead_tfm(tfm)); | 1252 | crypto_free_tfm(crypto_aead_tfm(tfm)); |
@@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) | |||
750 | return &crypto_aead_tfm(tfm)->crt_aead; | 1257 | return &crypto_aead_tfm(tfm)->crt_aead; |
751 | } | 1258 | } |
752 | 1259 | ||
1260 | /** | ||
1261 | * crypto_aead_ivsize() - obtain IV size | ||
1262 | * @tfm: cipher handle | ||
1263 | * | ||
1264 | * The size of the IV for the aead referenced by the cipher handle is | ||
1265 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
1266 | * | ||
1267 | * Return: IV size in bytes | ||
1268 | */ | ||
753 | static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) | 1269 | static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) |
754 | { | 1270 | { |
755 | return crypto_aead_crt(tfm)->ivsize; | 1271 | return crypto_aead_crt(tfm)->ivsize; |
756 | } | 1272 | } |
757 | 1273 | ||
1274 | /** | ||
1275 | * crypto_aead_authsize() - obtain maximum authentication data size | ||
1276 | * @tfm: cipher handle | ||
1277 | * | ||
1278 | * The maximum size of the authentication data for the AEAD cipher referenced | ||
1279 | * by the AEAD cipher handle is returned. The authentication data size may be | ||
1280 | * zero if the cipher implements a hard-coded maximum. | ||
1281 | * | ||
1282 | * The authentication data may also be known as "tag value". | ||
1283 | * | ||
1284 | * Return: authentication data size / tag size in bytes | ||
1285 | */ | ||
758 | static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) | 1286 | static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) |
759 | { | 1287 | { |
760 | return crypto_aead_crt(tfm)->authsize; | 1288 | return crypto_aead_crt(tfm)->authsize; |
761 | } | 1289 | } |
762 | 1290 | ||
1291 | /** | ||
1292 | * crypto_aead_blocksize() - obtain block size of cipher | ||
1293 | * @tfm: cipher handle | ||
1294 | * | ||
1295 | * The block size for the AEAD referenced with the cipher handle is returned. | ||
1296 | * The caller may use that information to allocate appropriate memory for the | ||
1297 | * data returned by the encryption or decryption operation | ||
1298 | * | ||
1299 | * Return: block size of cipher | ||
1300 | */ | ||
763 | static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) | 1301 | static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) |
764 | { | 1302 | { |
765 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); | 1303 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); |
@@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) | |||
785 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); | 1323 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); |
786 | } | 1324 | } |
787 | 1325 | ||
1326 | /** | ||
1327 | * crypto_aead_setkey() - set key for cipher | ||
1328 | * @tfm: cipher handle | ||
1329 | * @key: buffer holding the key | ||
1330 | * @keylen: length of the key in bytes | ||
1331 | * | ||
1332 | * The caller provided key is set for the AEAD referenced by the cipher | ||
1333 | * handle. | ||
1334 | * | ||
1335 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
1336 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
1337 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
1338 | * is performed. | ||
1339 | * | ||
1340 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
1341 | */ | ||
788 | static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, | 1342 | static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
789 | unsigned int keylen) | 1343 | unsigned int keylen) |
790 | { | 1344 | { |
@@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
793 | return crt->setkey(crt->base, key, keylen); | 1347 | return crt->setkey(crt->base, key, keylen); |
794 | } | 1348 | } |
795 | 1349 | ||
1350 | /** | ||
1351 | * crypto_aead_setauthsize() - set authentication data size | ||
1352 | * @tfm: cipher handle | ||
1353 | * @authsize: size of the authentication data / tag in bytes | ||
1354 | * | ||
1355 | * Set the authentication data size / tag size. AEAD requires an authentication | ||
1356 | * tag (or MAC) in addition to the associated data. | ||
1357 | * | ||
1358 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
1359 | */ | ||
796 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); | 1360 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); |
797 | 1361 | ||
798 | static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | 1362 | static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) |
@@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | |||
800 | return __crypto_aead_cast(req->base.tfm); | 1364 | return __crypto_aead_cast(req->base.tfm); |
801 | } | 1365 | } |
802 | 1366 | ||
1367 | /** | ||
1368 | * crypto_aead_encrypt() - encrypt plaintext | ||
1369 | * @req: reference to the aead_request handle that holds all information | ||
1370 | * needed to perform the cipher operation | ||
1371 | * | ||
1372 | * Encrypt plaintext data using the aead_request handle. That data structure | ||
1373 | * and how it is filled with data is discussed with the aead_request_* | ||
1374 | * functions. | ||
1375 | * | ||
1376 | * IMPORTANT NOTE The encryption operation creates the authentication data / | ||
1377 | * tag. That data is concatenated with the created ciphertext. | ||
1378 | * The ciphertext memory size is therefore the given number of | ||
1379 | * block cipher blocks + the size defined by the | ||
1380 | * crypto_aead_setauthsize invocation. The caller must ensure | ||
1381 | * that sufficient memory is available for the ciphertext and | ||
1382 | * the authentication tag. | ||
1383 | * | ||
1384 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1385 | */ | ||
803 | static inline int crypto_aead_encrypt(struct aead_request *req) | 1386 | static inline int crypto_aead_encrypt(struct aead_request *req) |
804 | { | 1387 | { |
805 | return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); | 1388 | return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); |
806 | } | 1389 | } |
807 | 1390 | ||
1391 | /** | ||
1392 | * crypto_aead_decrypt() - decrypt ciphertext | ||
1393 | * @req: reference to the ablkcipher_request handle that holds all information | ||
1394 | * needed to perform the cipher operation | ||
1395 | * | ||
1396 | * Decrypt ciphertext data using the aead_request handle. That data structure | ||
1397 | * and how it is filled with data is discussed with the aead_request_* | ||
1398 | * functions. | ||
1399 | * | ||
1400 | * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the | ||
1401 | * authentication data / tag. That authentication data / tag | ||
1402 | * must have the size defined by the crypto_aead_setauthsize | ||
1403 | * invocation. | ||
1404 | * | ||
1405 | * | ||
1406 | * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD | ||
1407 | * cipher operation performs the authentication of the data during the | ||
1408 | * decryption operation. Therefore, the function returns this error if | ||
1409 | * the authentication of the ciphertext was unsuccessful (i.e. the | ||
1410 | * integrity of the ciphertext or the associated data was violated); | ||
1411 | * < 0 if an error occurred. | ||
1412 | */ | ||
808 | static inline int crypto_aead_decrypt(struct aead_request *req) | 1413 | static inline int crypto_aead_decrypt(struct aead_request *req) |
809 | { | 1414 | { |
810 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); | 1415 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); |
811 | } | 1416 | } |
812 | 1417 | ||
1418 | /** | ||
1419 | * DOC: Asynchronous AEAD Request Handle | ||
1420 | * | ||
1421 | * The aead_request data structure contains all pointers to data required for | ||
1422 | * the AEAD cipher operation. This includes the cipher handle (which can be | ||
1423 | * used by multiple aead_request instances), pointer to plaintext and | ||
1424 | * ciphertext, asynchronous callback function, etc. It acts as a handle to the | ||
1425 | * aead_request_* API calls in a similar way as AEAD handle to the | ||
1426 | * crypto_aead_* API calls. | ||
1427 | */ | ||
1428 | |||
1429 | /** | ||
1430 | * crypto_aead_reqsize() - obtain size of the request data structure | ||
1431 | * @tfm: cipher handle | ||
1432 | * | ||
1433 | * Return: number of bytes | ||
1434 | */ | ||
813 | static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) | 1435 | static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) |
814 | { | 1436 | { |
815 | return crypto_aead_crt(tfm)->reqsize; | 1437 | return crypto_aead_crt(tfm)->reqsize; |
816 | } | 1438 | } |
817 | 1439 | ||
1440 | /** | ||
1441 | * aead_request_set_tfm() - update cipher handle reference in request | ||
1442 | * @req: request handle to be modified | ||
1443 | * @tfm: cipher handle that shall be added to the request handle | ||
1444 | * | ||
1445 | * Allow the caller to replace the existing aead handle in the request | ||
1446 | * data structure with a different one. | ||
1447 | */ | ||
818 | static inline void aead_request_set_tfm(struct aead_request *req, | 1448 | static inline void aead_request_set_tfm(struct aead_request *req, |
819 | struct crypto_aead *tfm) | 1449 | struct crypto_aead *tfm) |
820 | { | 1450 | { |
821 | req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); | 1451 | req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); |
822 | } | 1452 | } |
823 | 1453 | ||
1454 | /** | ||
1455 | * aead_request_alloc() - allocate request data structure | ||
1456 | * @tfm: cipher handle to be registered with the request | ||
1457 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
1458 | * | ||
1459 | * Allocate the request data structure that must be used with the AEAD | ||
1460 | * encrypt and decrypt API calls. During the allocation, the provided aead | ||
1461 | * handle is registered in the request data structure. | ||
1462 | * | ||
1463 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
1464 | * of an error, PTR_ERR() returns the error code. | ||
1465 | */ | ||
824 | static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | 1466 | static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, |
825 | gfp_t gfp) | 1467 | gfp_t gfp) |
826 | { | 1468 | { |
@@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | |||
834 | return req; | 1476 | return req; |
835 | } | 1477 | } |
836 | 1478 | ||
1479 | /** | ||
1480 | * aead_request_free() - zeroize and free request data structure | ||
1481 | * @req: request data structure cipher handle to be freed | ||
1482 | */ | ||
837 | static inline void aead_request_free(struct aead_request *req) | 1483 | static inline void aead_request_free(struct aead_request *req) |
838 | { | 1484 | { |
839 | kzfree(req); | 1485 | kzfree(req); |
840 | } | 1486 | } |
841 | 1487 | ||
1488 | /** | ||
1489 | * aead_request_set_callback() - set asynchronous callback function | ||
1490 | * @req: request handle | ||
1491 | * @flags: specify zero or an ORing of the flags | ||
1492 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
1493 | * increase the wait queue beyond the initial maximum size; | ||
1494 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
1495 | * @compl: callback function pointer to be registered with the request handle | ||
1496 | * @data: The data pointer refers to memory that is not used by the kernel | ||
1497 | * crypto API, but provided to the callback function for it to use. Here, | ||
1498 | * the caller can provide a reference to memory the callback function can | ||
1499 | * operate on. As the callback function is invoked asynchronously to the | ||
1500 | * related functionality, it may need to access data structures of the | ||
1501 | * related functionality which can be referenced using this pointer. The | ||
1502 | * callback function can access the memory via the "data" field in the | ||
1503 | * crypto_async_request data structure provided to the callback function. | ||
1504 | * | ||
1505 | * Setting the callback function that is triggered once the cipher operation | ||
1506 | * completes | ||
1507 | * | ||
1508 | * The callback function is registered with the aead_request handle and | ||
1509 | * must comply with the following template: | ||
1510 | * | ||
1511 | * void callback_function(struct crypto_async_request *req, int error) | ||
1512 | */ | ||
842 | static inline void aead_request_set_callback(struct aead_request *req, | 1513 | static inline void aead_request_set_callback(struct aead_request *req, |
843 | u32 flags, | 1514 | u32 flags, |
844 | crypto_completion_t compl, | 1515 | crypto_completion_t compl, |
@@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req, | |||
849 | req->base.flags = flags; | 1520 | req->base.flags = flags; |
850 | } | 1521 | } |
851 | 1522 | ||
1523 | /** | ||
1524 | * aead_request_set_crypt - set data buffers | ||
1525 | * @req: request handle | ||
1526 | * @src: source scatter / gather list | ||
1527 | * @dst: destination scatter / gather list | ||
1528 | * @cryptlen: number of bytes to process from @src | ||
1529 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
1530 | * by crypto_aead_ivsize() | ||
1531 | * | ||
1532 | * Setting the source data and destination data scatter / gather lists. | ||
1533 | * | ||
1534 | * For encryption, the source is treated as the plaintext and the | ||
1535 | * destination is the ciphertext. For a decryption operation, the use is | ||
1536 | * reversed: the source is the ciphertext and the destination is the plaintext. | ||
1537 | * | ||
1538 | * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, | ||
1539 | * the caller must concatenate the ciphertext followed by the | ||
1540 | * authentication tag and provide the entire data stream to the | ||
1541 | * decryption operation (i.e. the data length used for the | ||
1542 | * initialization of the scatterlist and the data length for the | ||
1543 | * decryption operation is identical). For encryption, however, | ||
1544 | * the authentication tag is created while encrypting the data. | ||
1545 | * The destination buffer must hold sufficient space for the | ||
1546 | * ciphertext and the authentication tag while the encryption | ||
1547 | * invocation must only point to the plaintext data size. The | ||
1548 | * following code snippet illustrates the memory usage | ||
1549 | * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); | ||
1550 | * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); | ||
1551 | * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); | ||
1552 | */ | ||
852 | static inline void aead_request_set_crypt(struct aead_request *req, | 1553 | static inline void aead_request_set_crypt(struct aead_request *req, |
853 | struct scatterlist *src, | 1554 | struct scatterlist *src, |
854 | struct scatterlist *dst, | 1555 | struct scatterlist *dst, |
@@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req, | |||
860 | req->iv = iv; | 1561 | req->iv = iv; |
861 | } | 1562 | } |
862 | 1563 | ||
1564 | /** | ||
1565 | * aead_request_set_assoc() - set the associated data scatter / gather list | ||
1566 | * @req: request handle | ||
1567 | * @assoc: associated data scatter / gather list | ||
1568 | * @assoclen: number of bytes to process from @assoc | ||
1569 | * | ||
1570 | * For encryption, the memory is filled with the associated data. For | ||
1571 | * decryption, the memory must point to the associated data. | ||
1572 | */ | ||
863 | static inline void aead_request_set_assoc(struct aead_request *req, | 1573 | static inline void aead_request_set_assoc(struct aead_request *req, |
864 | struct scatterlist *assoc, | 1574 | struct scatterlist *assoc, |
865 | unsigned int assoclen) | 1575 | unsigned int assoclen) |
@@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req, | |||
868 | req->assoclen = assoclen; | 1578 | req->assoclen = assoclen; |
869 | } | 1579 | } |
870 | 1580 | ||
1581 | /** | ||
1582 | * DOC: Synchronous Block Cipher API | ||
1583 | * | ||
1584 | * The synchronous block cipher API is used with the ciphers of type | ||
1585 | * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) | ||
1586 | * | ||
1587 | * Synchronous calls, have a context in the tfm. But since a single tfm can be | ||
1588 | * used in multiple calls and in parallel, this info should not be changeable | ||
1589 | * (unless a lock is used). This applies, for example, to the symmetric key. | ||
1590 | * However, the IV is changeable, so there is an iv field in blkcipher_tfm | ||
1591 | * structure for synchronous blkcipher api. So, its the only state info that can | ||
1592 | * be kept for synchronous calls without using a big lock across a tfm. | ||
1593 | * | ||
1594 | * The block cipher API allows the use of a complete cipher, i.e. a cipher | ||
1595 | * consisting of a template (a block chaining mode) and a single block cipher | ||
1596 | * primitive (e.g. AES). | ||
1597 | * | ||
1598 | * The plaintext data buffer and the ciphertext data buffer are pointed to | ||
1599 | * by using scatter/gather lists. The cipher operation is performed | ||
1600 | * on all segments of the provided scatter/gather lists. | ||
1601 | * | ||
1602 | * The kernel crypto API supports a cipher operation "in-place" which means that | ||
1603 | * the caller may provide the same scatter/gather list for the plaintext and | ||
1604 | * cipher text. After the completion of the cipher operation, the plaintext | ||
1605 | * data is replaced with the ciphertext data in case of an encryption and vice | ||
1606 | * versa for a decryption. The caller must ensure that the scatter/gather lists | ||
1607 | * for the output data point to sufficiently large buffers, i.e. multiples of | ||
1608 | * the block size of the cipher. | ||
1609 | */ | ||
1610 | |||
871 | static inline struct crypto_blkcipher *__crypto_blkcipher_cast( | 1611 | static inline struct crypto_blkcipher *__crypto_blkcipher_cast( |
872 | struct crypto_tfm *tfm) | 1612 | struct crypto_tfm *tfm) |
873 | { | 1613 | { |
@@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( | |||
881 | return __crypto_blkcipher_cast(tfm); | 1621 | return __crypto_blkcipher_cast(tfm); |
882 | } | 1622 | } |
883 | 1623 | ||
1624 | /** | ||
1625 | * crypto_alloc_blkcipher() - allocate synchronous block cipher handle | ||
1626 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1627 | * blkcipher cipher | ||
1628 | * @type: specifies the type of the cipher | ||
1629 | * @mask: specifies the mask for the cipher | ||
1630 | * | ||
1631 | * Allocate a cipher handle for a block cipher. The returned struct | ||
1632 | * crypto_blkcipher is the cipher handle that is required for any subsequent | ||
1633 | * API invocation for that block cipher. | ||
1634 | * | ||
1635 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
1636 | * of an error, PTR_ERR() returns the error code. | ||
1637 | */ | ||
884 | static inline struct crypto_blkcipher *crypto_alloc_blkcipher( | 1638 | static inline struct crypto_blkcipher *crypto_alloc_blkcipher( |
885 | const char *alg_name, u32 type, u32 mask) | 1639 | const char *alg_name, u32 type, u32 mask) |
886 | { | 1640 | { |
@@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm( | |||
897 | return &tfm->base; | 1651 | return &tfm->base; |
898 | } | 1652 | } |
899 | 1653 | ||
1654 | /** | ||
1655 | * crypto_free_blkcipher() - zeroize and free the block cipher handle | ||
1656 | * @tfm: cipher handle to be freed | ||
1657 | */ | ||
900 | static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) | 1658 | static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) |
901 | { | 1659 | { |
902 | crypto_free_tfm(crypto_blkcipher_tfm(tfm)); | 1660 | crypto_free_tfm(crypto_blkcipher_tfm(tfm)); |
903 | } | 1661 | } |
904 | 1662 | ||
1663 | /** | ||
1664 | * crypto_has_blkcipher() - Search for the availability of a block cipher | ||
1665 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1666 | * block cipher | ||
1667 | * @type: specifies the type of the cipher | ||
1668 | * @mask: specifies the mask for the cipher | ||
1669 | * | ||
1670 | * Return: true when the block cipher is known to the kernel crypto API; false | ||
1671 | * otherwise | ||
1672 | */ | ||
905 | static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) | 1673 | static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) |
906 | { | 1674 | { |
907 | type &= ~CRYPTO_ALG_TYPE_MASK; | 1675 | type &= ~CRYPTO_ALG_TYPE_MASK; |
@@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) | |||
911 | return crypto_has_alg(alg_name, type, mask); | 1679 | return crypto_has_alg(alg_name, type, mask); |
912 | } | 1680 | } |
913 | 1681 | ||
1682 | /** | ||
1683 | * crypto_blkcipher_name() - return the name / cra_name from the cipher handle | ||
1684 | * @tfm: cipher handle | ||
1685 | * | ||
1686 | * Return: The character string holding the name of the cipher | ||
1687 | */ | ||
914 | static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) | 1688 | static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) |
915 | { | 1689 | { |
916 | return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); | 1690 | return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); |
@@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg( | |||
928 | return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; | 1702 | return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; |
929 | } | 1703 | } |
930 | 1704 | ||
1705 | /** | ||
1706 | * crypto_blkcipher_ivsize() - obtain IV size | ||
1707 | * @tfm: cipher handle | ||
1708 | * | ||
1709 | * The size of the IV for the block cipher referenced by the cipher handle is | ||
1710 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
1711 | * | ||
1712 | * Return: IV size in bytes | ||
1713 | */ | ||
931 | static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) | 1714 | static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) |
932 | { | 1715 | { |
933 | return crypto_blkcipher_alg(tfm)->ivsize; | 1716 | return crypto_blkcipher_alg(tfm)->ivsize; |
934 | } | 1717 | } |
935 | 1718 | ||
1719 | /** | ||
1720 | * crypto_blkcipher_blocksize() - obtain block size of cipher | ||
1721 | * @tfm: cipher handle | ||
1722 | * | ||
1723 | * The block size for the block cipher referenced with the cipher handle is | ||
1724 | * returned. The caller may use that information to allocate appropriate | ||
1725 | * memory for the data returned by the encryption or decryption operation. | ||
1726 | * | ||
1727 | * Return: block size of cipher | ||
1728 | */ | ||
936 | static inline unsigned int crypto_blkcipher_blocksize( | 1729 | static inline unsigned int crypto_blkcipher_blocksize( |
937 | struct crypto_blkcipher *tfm) | 1730 | struct crypto_blkcipher *tfm) |
938 | { | 1731 | { |
@@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, | |||
962 | crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); | 1755 | crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); |
963 | } | 1756 | } |
964 | 1757 | ||
1758 | /** | ||
1759 | * crypto_blkcipher_setkey() - set key for cipher | ||
1760 | * @tfm: cipher handle | ||
1761 | * @key: buffer holding the key | ||
1762 | * @keylen: length of the key in bytes | ||
1763 | * | ||
1764 | * The caller provided key is set for the block cipher referenced by the cipher | ||
1765 | * handle. | ||
1766 | * | ||
1767 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
1768 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
1769 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
1770 | * is performed. | ||
1771 | * | ||
1772 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
1773 | */ | ||
965 | static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, | 1774 | static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, |
966 | const u8 *key, unsigned int keylen) | 1775 | const u8 *key, unsigned int keylen) |
967 | { | 1776 | { |
@@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, | |||
969 | key, keylen); | 1778 | key, keylen); |
970 | } | 1779 | } |
971 | 1780 | ||
1781 | /** | ||
1782 | * crypto_blkcipher_encrypt() - encrypt plaintext | ||
1783 | * @desc: reference to the block cipher handle with meta data | ||
1784 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
1785 | * ciphertext | ||
1786 | * @src: scatter/gather list that holds the plaintext | ||
1787 | * @nbytes: number of bytes of the plaintext to encrypt. | ||
1788 | * | ||
1789 | * Encrypt plaintext data using the IV set by the caller with a preceding | ||
1790 | * call of crypto_blkcipher_set_iv. | ||
1791 | * | ||
1792 | * The blkcipher_desc data structure must be filled by the caller and can | ||
1793 | * reside on the stack. The caller must fill desc as follows: desc.tfm is filled | ||
1794 | * with the block cipher handle; desc.flags is filled with either | ||
1795 | * CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
1796 | * | ||
1797 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1798 | */ | ||
972 | static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, | 1799 | static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, |
973 | struct scatterlist *dst, | 1800 | struct scatterlist *dst, |
974 | struct scatterlist *src, | 1801 | struct scatterlist *src, |
@@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, | |||
978 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | 1805 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); |
979 | } | 1806 | } |
980 | 1807 | ||
1808 | /** | ||
1809 | * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV | ||
1810 | * @desc: reference to the block cipher handle with meta data | ||
1811 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
1812 | * ciphertext | ||
1813 | * @src: scatter/gather list that holds the plaintext | ||
1814 | * @nbytes: number of bytes of the plaintext to encrypt. | ||
1815 | * | ||
1816 | * Encrypt plaintext data with the use of an IV that is solely used for this | ||
1817 | * cipher operation. Any previously set IV is not used. | ||
1818 | * | ||
1819 | * The blkcipher_desc data structure must be filled by the caller and can | ||
1820 | * reside on the stack. The caller must fill desc as follows: desc.tfm is filled | ||
1821 | * with the block cipher handle; desc.info is filled with the IV to be used for | ||
1822 | * the current operation; desc.flags is filled with either | ||
1823 | * CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
1824 | * | ||
1825 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1826 | */ | ||
981 | static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, | 1827 | static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, |
982 | struct scatterlist *dst, | 1828 | struct scatterlist *dst, |
983 | struct scatterlist *src, | 1829 | struct scatterlist *src, |
@@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, | |||
986 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | 1832 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); |
987 | } | 1833 | } |
988 | 1834 | ||
1835 | /** | ||
1836 | * crypto_blkcipher_decrypt() - decrypt ciphertext | ||
1837 | * @desc: reference to the block cipher handle with meta data | ||
1838 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
1839 | * plaintext | ||
1840 | * @src: scatter/gather list that holds the ciphertext | ||
1841 | * @nbytes: number of bytes of the ciphertext to decrypt. | ||
1842 | * | ||
1843 | * Decrypt ciphertext data using the IV set by the caller with a preceding | ||
1844 | * call of crypto_blkcipher_set_iv. | ||
1845 | * | ||
1846 | * The blkcipher_desc data structure must be filled by the caller as documented | ||
1847 | * for the crypto_blkcipher_encrypt call above. | ||
1848 | * | ||
1849 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1850 | * | ||
1851 | */ | ||
989 | static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, | 1852 | static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, |
990 | struct scatterlist *dst, | 1853 | struct scatterlist *dst, |
991 | struct scatterlist *src, | 1854 | struct scatterlist *src, |
@@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, | |||
995 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | 1858 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); |
996 | } | 1859 | } |
997 | 1860 | ||
1861 | /** | ||
1862 | * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV | ||
1863 | * @desc: reference to the block cipher handle with meta data | ||
1864 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
1865 | * plaintext | ||
1866 | * @src: scatter/gather list that holds the ciphertext | ||
1867 | * @nbytes: number of bytes of the ciphertext to decrypt. | ||
1868 | * | ||
1869 | * Decrypt ciphertext data with the use of an IV that is solely used for this | ||
1870 | * cipher operation. Any previously set IV is not used. | ||
1871 | * | ||
1872 | * The blkcipher_desc data structure must be filled by the caller as documented | ||
1873 | * for the crypto_blkcipher_encrypt_iv call above. | ||
1874 | * | ||
1875 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1876 | */ | ||
998 | static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, | 1877 | static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, |
999 | struct scatterlist *dst, | 1878 | struct scatterlist *dst, |
1000 | struct scatterlist *src, | 1879 | struct scatterlist *src, |
@@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, | |||
1003 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | 1882 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); |
1004 | } | 1883 | } |
1005 | 1884 | ||
1885 | /** | ||
1886 | * crypto_blkcipher_set_iv() - set IV for cipher | ||
1887 | * @tfm: cipher handle | ||
1888 | * @src: buffer holding the IV | ||
1889 | * @len: length of the IV in bytes | ||
1890 | * | ||
1891 | * The caller provided IV is set for the block cipher referenced by the cipher | ||
1892 | * handle. | ||
1893 | */ | ||
1006 | static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, | 1894 | static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, |
1007 | const u8 *src, unsigned int len) | 1895 | const u8 *src, unsigned int len) |
1008 | { | 1896 | { |
1009 | memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); | 1897 | memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); |
1010 | } | 1898 | } |
1011 | 1899 | ||
1900 | /** | ||
1901 | * crypto_blkcipher_get_iv() - obtain IV from cipher | ||
1902 | * @tfm: cipher handle | ||
1903 | * @dst: buffer filled with the IV | ||
1904 | * @len: length of the buffer dst | ||
1905 | * | ||
1906 | * The caller can obtain the IV set for the block cipher referenced by the | ||
1907 | * cipher handle and store it into the user-provided buffer. If the buffer | ||
1908 | * has an insufficient space, the IV is truncated to fit the buffer. | ||
1909 | */ | ||
1012 | static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, | 1910 | static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, |
1013 | u8 *dst, unsigned int len) | 1911 | u8 *dst, unsigned int len) |
1014 | { | 1912 | { |
1015 | memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); | 1913 | memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); |
1016 | } | 1914 | } |
1017 | 1915 | ||
1916 | /** | ||
1917 | * DOC: Single Block Cipher API | ||
1918 | * | ||
1919 | * The single block cipher API is used with the ciphers of type | ||
1920 | * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). | ||
1921 | * | ||
1922 | * Using the single block cipher API calls, operations with the basic cipher | ||
1923 | * primitive can be implemented. These cipher primitives exclude any block | ||
1924 | * chaining operations including IV handling. | ||
1925 | * | ||
1926 | * The purpose of this single block cipher API is to support the implementation | ||
1927 | * of templates or other concepts that only need to perform the cipher operation | ||
1928 | * on one block at a time. Templates invoke the underlying cipher primitive | ||
1929 | * block-wise and process either the input or the output data of these cipher | ||
1930 | * operations. | ||
1931 | */ | ||
1932 | |||
1018 | static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) | 1933 | static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) |
1019 | { | 1934 | { |
1020 | return (struct crypto_cipher *)tfm; | 1935 | return (struct crypto_cipher *)tfm; |
@@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) | |||
1026 | return __crypto_cipher_cast(tfm); | 1941 | return __crypto_cipher_cast(tfm); |
1027 | } | 1942 | } |
1028 | 1943 | ||
1944 | /** | ||
1945 | * crypto_alloc_cipher() - allocate single block cipher handle | ||
1946 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1947 | * single block cipher | ||
1948 | * @type: specifies the type of the cipher | ||
1949 | * @mask: specifies the mask for the cipher | ||
1950 | * | ||
1951 | * Allocate a cipher handle for a single block cipher. The returned struct | ||
1952 | * crypto_cipher is the cipher handle that is required for any subsequent API | ||
1953 | * invocation for that single block cipher. | ||
1954 | * | ||
1955 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
1956 | * of an error, PTR_ERR() returns the error code. | ||
1957 | */ | ||
1029 | static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, | 1958 | static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, |
1030 | u32 type, u32 mask) | 1959 | u32 type, u32 mask) |
1031 | { | 1960 | { |
@@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) | |||
1041 | return &tfm->base; | 1970 | return &tfm->base; |
1042 | } | 1971 | } |
1043 | 1972 | ||
1973 | /** | ||
1974 | * crypto_free_cipher() - zeroize and free the single block cipher handle | ||
1975 | * @tfm: cipher handle to be freed | ||
1976 | */ | ||
1044 | static inline void crypto_free_cipher(struct crypto_cipher *tfm) | 1977 | static inline void crypto_free_cipher(struct crypto_cipher *tfm) |
1045 | { | 1978 | { |
1046 | crypto_free_tfm(crypto_cipher_tfm(tfm)); | 1979 | crypto_free_tfm(crypto_cipher_tfm(tfm)); |
1047 | } | 1980 | } |
1048 | 1981 | ||
1982 | /** | ||
1983 | * crypto_has_cipher() - Search for the availability of a single block cipher | ||
1984 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1985 | * single block cipher | ||
1986 | * @type: specifies the type of the cipher | ||
1987 | * @mask: specifies the mask for the cipher | ||
1988 | * | ||
1989 | * Return: true when the single block cipher is known to the kernel crypto API; | ||
1990 | * false otherwise | ||
1991 | */ | ||
1049 | static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) | 1992 | static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) |
1050 | { | 1993 | { |
1051 | type &= ~CRYPTO_ALG_TYPE_MASK; | 1994 | type &= ~CRYPTO_ALG_TYPE_MASK; |
@@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) | |||
1060 | return &crypto_cipher_tfm(tfm)->crt_cipher; | 2003 | return &crypto_cipher_tfm(tfm)->crt_cipher; |
1061 | } | 2004 | } |
1062 | 2005 | ||
2006 | /** | ||
2007 | * crypto_cipher_blocksize() - obtain block size for cipher | ||
2008 | * @tfm: cipher handle | ||
2009 | * | ||
2010 | * The block size for the single block cipher referenced with the cipher handle | ||
2011 | * tfm is returned. The caller may use that information to allocate appropriate | ||
2012 | * memory for the data returned by the encryption or decryption operation | ||
2013 | * | ||
2014 | * Return: block size of cipher | ||
2015 | */ | ||
1063 | static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) | 2016 | static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) |
1064 | { | 2017 | { |
1065 | return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); | 2018 | return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); |
@@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, | |||
1087 | crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); | 2040 | crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); |
1088 | } | 2041 | } |
1089 | 2042 | ||
2043 | /** | ||
2044 | * crypto_cipher_setkey() - set key for cipher | ||
2045 | * @tfm: cipher handle | ||
2046 | * @key: buffer holding the key | ||
2047 | * @keylen: length of the key in bytes | ||
2048 | * | ||
2049 | * The caller provided key is set for the single block cipher referenced by the | ||
2050 | * cipher handle. | ||
2051 | * | ||
2052 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
2053 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
2054 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
2055 | * is performed. | ||
2056 | * | ||
2057 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
2058 | */ | ||
1090 | static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, | 2059 | static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, |
1091 | const u8 *key, unsigned int keylen) | 2060 | const u8 *key, unsigned int keylen) |
1092 | { | 2061 | { |
@@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, | |||
1094 | key, keylen); | 2063 | key, keylen); |
1095 | } | 2064 | } |
1096 | 2065 | ||
2066 | /** | ||
2067 | * crypto_cipher_encrypt_one() - encrypt one block of plaintext | ||
2068 | * @tfm: cipher handle | ||
2069 | * @dst: points to the buffer that will be filled with the ciphertext | ||
2070 | * @src: buffer holding the plaintext to be encrypted | ||
2071 | * | ||
2072 | * Invoke the encryption operation of one block. The caller must ensure that | ||
2073 | * the plaintext and ciphertext buffers are at least one block in size. | ||
2074 | */ | ||
1097 | static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, | 2075 | static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, |
1098 | u8 *dst, const u8 *src) | 2076 | u8 *dst, const u8 *src) |
1099 | { | 2077 | { |
@@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, | |||
1101 | dst, src); | 2079 | dst, src); |
1102 | } | 2080 | } |
1103 | 2081 | ||
2082 | /** | ||
2083 | * crypto_cipher_decrypt_one() - decrypt one block of ciphertext | ||
2084 | * @tfm: cipher handle | ||
2085 | * @dst: points to the buffer that will be filled with the plaintext | ||
2086 | * @src: buffer holding the ciphertext to be decrypted | ||
2087 | * | ||
2088 | * Invoke the decryption operation of one block. The caller must ensure that | ||
2089 | * the plaintext and ciphertext buffers are at least one block in size. | ||
2090 | */ | ||
1104 | static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, | 2091 | static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, |
1105 | u8 *dst, const u8 *src) | 2092 | u8 *dst, const u8 *src) |
1106 | { | 2093 | { |
@@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, | |||
1108 | dst, src); | 2095 | dst, src); |
1109 | } | 2096 | } |
1110 | 2097 | ||
2098 | /** | ||
2099 | * DOC: Synchronous Message Digest API | ||
2100 | * | ||
2101 | * The synchronous message digest API is used with the ciphers of type | ||
2102 | * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto) | ||
2103 | */ | ||
2104 | |||
1111 | static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) | 2105 | static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) |
1112 | { | 2106 | { |
1113 | return (struct crypto_hash *)tfm; | 2107 | return (struct crypto_hash *)tfm; |
@@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) | |||
1120 | return __crypto_hash_cast(tfm); | 2114 | return __crypto_hash_cast(tfm); |
1121 | } | 2115 | } |
1122 | 2116 | ||
2117 | /** | ||
2118 | * crypto_alloc_hash() - allocate synchronous message digest handle | ||
2119 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
2120 | * message digest cipher | ||
2121 | * @type: specifies the type of the cipher | ||
2122 | * @mask: specifies the mask for the cipher | ||
2123 | * | ||
2124 | * Allocate a cipher handle for a message digest. The returned struct | ||
2125 | * crypto_hash is the cipher handle that is required for any subsequent | ||
2126 | * API invocation for that message digest. | ||
2127 | * | ||
2128 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
2129 | * of an error, PTR_ERR() returns the error code. | ||
2130 | */ | ||
1123 | static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, | 2131 | static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, |
1124 | u32 type, u32 mask) | 2132 | u32 type, u32 mask) |
1125 | { | 2133 | { |
@@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) | |||
1136 | return &tfm->base; | 2144 | return &tfm->base; |
1137 | } | 2145 | } |
1138 | 2146 | ||
2147 | /** | ||
2148 | * crypto_free_hash() - zeroize and free message digest handle | ||
2149 | * @tfm: cipher handle to be freed | ||
2150 | */ | ||
1139 | static inline void crypto_free_hash(struct crypto_hash *tfm) | 2151 | static inline void crypto_free_hash(struct crypto_hash *tfm) |
1140 | { | 2152 | { |
1141 | crypto_free_tfm(crypto_hash_tfm(tfm)); | 2153 | crypto_free_tfm(crypto_hash_tfm(tfm)); |
1142 | } | 2154 | } |
1143 | 2155 | ||
2156 | /** | ||
2157 | * crypto_has_hash() - Search for the availability of a message digest | ||
2158 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
2159 | * message digest cipher | ||
2160 | * @type: specifies the type of the cipher | ||
2161 | * @mask: specifies the mask for the cipher | ||
2162 | * | ||
2163 | * Return: true when the message digest cipher is known to the kernel crypto | ||
2164 | * API; false otherwise | ||
2165 | */ | ||
1144 | static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) | 2166 | static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) |
1145 | { | 2167 | { |
1146 | type &= ~CRYPTO_ALG_TYPE_MASK; | 2168 | type &= ~CRYPTO_ALG_TYPE_MASK; |
@@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) | |||
1156 | return &crypto_hash_tfm(tfm)->crt_hash; | 2178 | return &crypto_hash_tfm(tfm)->crt_hash; |
1157 | } | 2179 | } |
1158 | 2180 | ||
2181 | /** | ||
2182 | * crypto_hash_blocksize() - obtain block size for message digest | ||
2183 | * @tfm: cipher handle | ||
2184 | * | ||
2185 | * The block size for the message digest cipher referenced with the cipher | ||
2186 | * handle is returned. | ||
2187 | * | ||
2188 | * Return: block size of cipher | ||
2189 | */ | ||
1159 | static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) | 2190 | static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) |
1160 | { | 2191 | { |
1161 | return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); | 2192 | return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); |
@@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) | |||
1166 | return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); | 2197 | return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); |
1167 | } | 2198 | } |
1168 | 2199 | ||
2200 | /** | ||
2201 | * crypto_hash_digestsize() - obtain message digest size | ||
2202 | * @tfm: cipher handle | ||
2203 | * | ||
2204 | * The size for the message digest created by the message digest cipher | ||
2205 | * referenced with the cipher handle is returned. | ||
2206 | * | ||
2207 | * Return: message digest size | ||
2208 | */ | ||
1169 | static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) | 2209 | static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) |
1170 | { | 2210 | { |
1171 | return crypto_hash_crt(tfm)->digestsize; | 2211 | return crypto_hash_crt(tfm)->digestsize; |
@@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) | |||
1186 | crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); | 2226 | crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); |
1187 | } | 2227 | } |
1188 | 2228 | ||
2229 | /** | ||
2230 | * crypto_hash_init() - (re)initialize message digest handle | ||
2231 | * @desc: cipher request handle that to be filled by caller -- | ||
2232 | * desc.tfm is filled with the hash cipher handle; | ||
2233 | * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
2234 | * | ||
2235 | * The call (re-)initializes the message digest referenced by the hash cipher | ||
2236 | * request handle. Any potentially existing state created by previous | ||
2237 | * operations is discarded. | ||
2238 | * | ||
2239 | * Return: 0 if the message digest initialization was successful; < 0 if an | ||
2240 | * error occurred | ||
2241 | */ | ||
1189 | static inline int crypto_hash_init(struct hash_desc *desc) | 2242 | static inline int crypto_hash_init(struct hash_desc *desc) |
1190 | { | 2243 | { |
1191 | return crypto_hash_crt(desc->tfm)->init(desc); | 2244 | return crypto_hash_crt(desc->tfm)->init(desc); |
1192 | } | 2245 | } |
1193 | 2246 | ||
2247 | /** | ||
2248 | * crypto_hash_update() - add data to message digest for processing | ||
2249 | * @desc: cipher request handle | ||
2250 | * @sg: scatter / gather list pointing to the data to be added to the message | ||
2251 | * digest | ||
2252 | * @nbytes: number of bytes to be processed from @sg | ||
2253 | * | ||
2254 | * Updates the message digest state of the cipher handle pointed to by the | ||
2255 | * hash cipher request handle with the input data pointed to by the | ||
2256 | * scatter/gather list. | ||
2257 | * | ||
2258 | * Return: 0 if the message digest update was successful; < 0 if an error | ||
2259 | * occurred | ||
2260 | */ | ||
1194 | static inline int crypto_hash_update(struct hash_desc *desc, | 2261 | static inline int crypto_hash_update(struct hash_desc *desc, |
1195 | struct scatterlist *sg, | 2262 | struct scatterlist *sg, |
1196 | unsigned int nbytes) | 2263 | unsigned int nbytes) |
@@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc, | |||
1198 | return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); | 2265 | return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); |
1199 | } | 2266 | } |
1200 | 2267 | ||
2268 | /** | ||
2269 | * crypto_hash_final() - calculate message digest | ||
2270 | * @desc: cipher request handle | ||
2271 | * @out: message digest output buffer -- The caller must ensure that the out | ||
2272 | * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize | ||
2273 | * function). | ||
2274 | * | ||
2275 | * Finalize the message digest operation and create the message digest | ||
2276 | * based on all data added to the cipher handle. The message digest is placed | ||
2277 | * into the output buffer. | ||
2278 | * | ||
2279 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
2280 | * occurred | ||
2281 | */ | ||
1201 | static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) | 2282 | static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) |
1202 | { | 2283 | { |
1203 | return crypto_hash_crt(desc->tfm)->final(desc, out); | 2284 | return crypto_hash_crt(desc->tfm)->final(desc, out); |
1204 | } | 2285 | } |
1205 | 2286 | ||
2287 | /** | ||
2288 | * crypto_hash_digest() - calculate message digest for a buffer | ||
2289 | * @desc: see crypto_hash_final() | ||
2290 | * @sg: see crypto_hash_update() | ||
2291 | * @nbytes: see crypto_hash_update() | ||
2292 | * @out: see crypto_hash_final() | ||
2293 | * | ||
2294 | * This function is a "short-hand" for the function calls of crypto_hash_init, | ||
2295 | * crypto_hash_update and crypto_hash_final. The parameters have the same | ||
2296 | * meaning as discussed for those separate three functions. | ||
2297 | * | ||
2298 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
2299 | * occurred | ||
2300 | */ | ||
1206 | static inline int crypto_hash_digest(struct hash_desc *desc, | 2301 | static inline int crypto_hash_digest(struct hash_desc *desc, |
1207 | struct scatterlist *sg, | 2302 | struct scatterlist *sg, |
1208 | unsigned int nbytes, u8 *out) | 2303 | unsigned int nbytes, u8 *out) |
@@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc, | |||
1210 | return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); | 2305 | return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); |
1211 | } | 2306 | } |
1212 | 2307 | ||
2308 | /** | ||
2309 | * crypto_hash_setkey() - set key for message digest | ||
2310 | * @hash: cipher handle | ||
2311 | * @key: buffer holding the key | ||
2312 | * @keylen: length of the key in bytes | ||
2313 | * | ||
2314 | * The caller provided key is set for the message digest cipher. The cipher | ||
2315 | * handle must point to a keyed hash in order for this function to succeed. | ||
2316 | * | ||
2317 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
2318 | */ | ||
1213 | static inline int crypto_hash_setkey(struct crypto_hash *hash, | 2319 | static inline int crypto_hash_setkey(struct crypto_hash *hash, |
1214 | const u8 *key, unsigned int keylen) | 2320 | const u8 *key, unsigned int keylen) |
1215 | { | 2321 | { |
diff --git a/include/net/sock.h b/include/net/sock.h index c3e83c9a8ab8..2210fec65669 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1593,6 +1593,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, | |||
1593 | int *errcode, int max_page_order); | 1593 | int *errcode, int max_page_order); |
1594 | void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); | 1594 | void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); |
1595 | void sock_kfree_s(struct sock *sk, void *mem, int size); | 1595 | void sock_kfree_s(struct sock *sk, void *mem, int size); |
1596 | void sock_kzfree_s(struct sock *sk, void *mem, int size); | ||
1596 | void sk_send_sigurg(struct sock *sk); | 1597 | void sk_send_sigurg(struct sock *sk); |
1597 | 1598 | ||
1598 | /* | 1599 | /* |
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h index 0f9acce5b1ff..f2acd2fde1f3 100644 --- a/include/uapi/linux/if_alg.h +++ b/include/uapi/linux/if_alg.h | |||
@@ -32,6 +32,8 @@ struct af_alg_iv { | |||
32 | #define ALG_SET_KEY 1 | 32 | #define ALG_SET_KEY 1 |
33 | #define ALG_SET_IV 2 | 33 | #define ALG_SET_IV 2 |
34 | #define ALG_SET_OP 3 | 34 | #define ALG_SET_OP 3 |
35 | #define ALG_SET_AEAD_ASSOCLEN 4 | ||
36 | #define ALG_SET_AEAD_AUTHSIZE 5 | ||
35 | 37 | ||
36 | /* Operations */ | 38 | /* Operations */ |
37 | #define ALG_OP_DECRYPT 0 | 39 | #define ALG_OP_DECRYPT 0 |