summaryrefslogtreecommitdiffstats
path: root/include/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 12:54:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 12:54:19 -0500
commit5bcbe22ca47da04cda3a858cef67f55b550c1d13 (patch)
tree49bd61e32eb2d652085a49182436322a3e0e9840 /include/crypto
parent1db934a5b77a9e37c4742c704fde6af233187a98 (diff)
parent12cb3a1c4184f891d965d1f39f8cfcc9ef617647 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Try to catch hash output overrun in testmgr - Introduce walksize attribute for batched walking - Make crypto_xor() and crypto_inc() alignment agnostic Algorithms: - Add time-invariant AES algorithm - Add standalone CBCMAC algorithm Drivers: - Add NEON acclerated chacha20 on ARM/ARM64 - Expose AES-CTR as synchronous skcipher on ARM64 - Add scalar AES implementation on ARM64 - Improve scalar AES implementation on ARM - Improve NEON AES implementation on ARM/ARM64 - Merge CRC32 and PMULL instruction based drivers on ARM64 - Add NEON acclerated CBCMAC/CMAC/XCBC AES on ARM64 - Add IPsec AUTHENC implementation in atmel - Add Support for Octeon-tx CPT Engine - Add Broadcom SPU driver - Add MediaTek driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (142 commits) crypto: xts - Add ECB dependency crypto: cavium - switch to pci_alloc_irq_vectors crypto: cavium - switch to pci_alloc_irq_vectors crypto: cavium - remove dead MSI-X related define crypto: brcm - Avoid double free in ahash_finup() crypto: cavium - fix Kconfig dependencies crypto: cavium - cpt_bind_vq_to_grp could return an error code crypto: doc - fix typo hwrng: omap - update Kconfig help description crypto: ccm - drop unnecessary minimum 32-bit alignment crypto: ccm - honour alignmask of subordinate MAC cipher crypto: caam - fix state buffer DMA (un)mapping crypto: caam - abstract ahash request double buffering crypto: caam - fix error path for ctx_dma mapping failure crypto: caam - fix DMA API leaks for multiple setkey() calls crypto: caam - don't dma_map key for hash algorithms crypto: caam - use dma_map_sg() return code crypto: caam - replace sg_count() with sg_nents_for_len() crypto: caam - check sg_count() return value crypto: caam - fix HW S/G in ablkcipher_giv_edesc_alloc() ..
Diffstat (limited to 'include/crypto')
-rw-r--r--include/crypto/algapi.h20
-rw-r--r--include/crypto/chacha20.h6
-rw-r--r--include/crypto/hash.h18
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/crypto/skcipher.h34
5 files changed, 68 insertions, 12 deletions
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 404e9558e879..ebe4ded0c55d 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -191,9 +191,25 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
191 return queue->qlen; 191 return queue->qlen;
192} 192}
193 193
194/* These functions require the input/output to be aligned as u32. */
195void crypto_inc(u8 *a, unsigned int size); 194void crypto_inc(u8 *a, unsigned int size);
196void crypto_xor(u8 *dst, const u8 *src, unsigned int size); 195void __crypto_xor(u8 *dst, const u8 *src, unsigned int size);
196
197static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
198{
199 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
200 __builtin_constant_p(size) &&
201 (size % sizeof(unsigned long)) == 0) {
202 unsigned long *d = (unsigned long *)dst;
203 unsigned long *s = (unsigned long *)src;
204
205 while (size > 0) {
206 *d++ ^= *s++;
207 size -= sizeof(unsigned long);
208 }
209 } else {
210 __crypto_xor(dst, src, size);
211 }
212}
197 213
198int blkcipher_walk_done(struct blkcipher_desc *desc, 214int blkcipher_walk_done(struct blkcipher_desc *desc,
199 struct blkcipher_walk *walk, int err); 215 struct blkcipher_walk *walk, int err);
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index 20d20f681a72..445fc45f4b5b 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -5,6 +5,7 @@
5#ifndef _CRYPTO_CHACHA20_H 5#ifndef _CRYPTO_CHACHA20_H
6#define _CRYPTO_CHACHA20_H 6#define _CRYPTO_CHACHA20_H
7 7
8#include <crypto/skcipher.h>
8#include <linux/types.h> 9#include <linux/types.h>
9#include <linux/crypto.h> 10#include <linux/crypto.h>
10 11
@@ -18,9 +19,8 @@ struct chacha20_ctx {
18 19
19void chacha20_block(u32 *state, void *stream); 20void chacha20_block(u32 *state, void *stream);
20void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); 21void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
21int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, 22int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
22 unsigned int keysize); 23 unsigned int keysize);
23int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 24int crypto_chacha20_crypt(struct skcipher_request *req);
24 struct scatterlist *src, unsigned int nbytes);
25 25
26#endif 26#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 216a2b876147..b5727bcd2336 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -329,6 +329,16 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
329 return crypto_hash_alg_common(tfm)->digestsize; 329 return crypto_hash_alg_common(tfm)->digestsize;
330} 330}
331 331
332/**
333 * crypto_ahash_statesize() - obtain size of the ahash state
334 * @tfm: cipher handle
335 *
336 * Return the size of the ahash state. With the crypto_ahash_export()
337 * function, the caller can export the state into a buffer whose size is
338 * defined with this function.
339 *
340 * Return: size of the ahash state
341 */
332static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) 342static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
333{ 343{
334 return crypto_hash_alg_common(tfm)->statesize; 344 return crypto_hash_alg_common(tfm)->statesize;
@@ -369,11 +379,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm(
369 * crypto_ahash_reqsize() - obtain size of the request data structure 379 * crypto_ahash_reqsize() - obtain size of the request data structure
370 * @tfm: cipher handle 380 * @tfm: cipher handle
371 * 381 *
372 * Return the size of the ahash state size. With the crypto_ahash_export 382 * Return: size of the request data
373 * function, the caller can export the state into a buffer whose size is
374 * defined with this function.
375 *
376 * Return: size of the ahash state
377 */ 383 */
378static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) 384static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
379{ 385{
@@ -453,7 +459,7 @@ int crypto_ahash_digest(struct ahash_request *req);
453 * 459 *
454 * This function exports the hash state of the ahash_request handle into the 460 * This function exports the hash state of the ahash_request handle into the
455 * caller-allocated output buffer out which must have sufficient size (e.g. by 461 * caller-allocated output buffer out which must have sufficient size (e.g. by
456 * calling crypto_ahash_reqsize). 462 * calling crypto_ahash_statesize()).
457 * 463 *
458 * Return: 0 if the export was successful; < 0 if an error occurred 464 * Return: 0 if the export was successful; < 0 if an error occurred
459 */ 465 */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 8735979ed341..e42f7063f245 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -66,7 +66,7 @@ struct skcipher_walk {
66 66
67 int flags; 67 int flags;
68 unsigned int blocksize; 68 unsigned int blocksize;
69 unsigned int chunksize; 69 unsigned int stride;
70 unsigned int alignmask; 70 unsigned int alignmask;
71}; 71};
72 72
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 750b14f1ada4..562001cb412b 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -115,6 +115,9 @@ struct crypto_skcipher {
115 * IV of exactly that size to perform the encrypt or decrypt operation. 115 * IV of exactly that size to perform the encrypt or decrypt operation.
116 * @chunksize: Equal to the block size except for stream ciphers such as 116 * @chunksize: Equal to the block size except for stream ciphers such as
117 * CTR where it is set to the underlying block size. 117 * CTR where it is set to the underlying block size.
118 * @walksize: Equal to the chunk size except in cases where the algorithm is
119 * considerably more efficient if it can operate on multiple chunks
120 * in parallel. Should be a multiple of chunksize.
118 * @base: Definition of a generic crypto algorithm. 121 * @base: Definition of a generic crypto algorithm.
119 * 122 *
120 * All fields except @ivsize are mandatory and must be filled. 123 * All fields except @ivsize are mandatory and must be filled.
@@ -131,6 +134,7 @@ struct skcipher_alg {
131 unsigned int max_keysize; 134 unsigned int max_keysize;
132 unsigned int ivsize; 135 unsigned int ivsize;
133 unsigned int chunksize; 136 unsigned int chunksize;
137 unsigned int walksize;
134 138
135 struct crypto_alg base; 139 struct crypto_alg base;
136}; 140};
@@ -289,6 +293,19 @@ static inline unsigned int crypto_skcipher_alg_chunksize(
289 return alg->chunksize; 293 return alg->chunksize;
290} 294}
291 295
296static inline unsigned int crypto_skcipher_alg_walksize(
297 struct skcipher_alg *alg)
298{
299 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
300 CRYPTO_ALG_TYPE_BLKCIPHER)
301 return alg->base.cra_blocksize;
302
303 if (alg->base.cra_ablkcipher.encrypt)
304 return alg->base.cra_blocksize;
305
306 return alg->walksize;
307}
308
292/** 309/**
293 * crypto_skcipher_chunksize() - obtain chunk size 310 * crypto_skcipher_chunksize() - obtain chunk size
294 * @tfm: cipher handle 311 * @tfm: cipher handle
@@ -307,6 +324,23 @@ static inline unsigned int crypto_skcipher_chunksize(
307} 324}
308 325
309/** 326/**
327 * crypto_skcipher_walksize() - obtain walk size
328 * @tfm: cipher handle
329 *
330 * In some cases, algorithms can only perform optimally when operating on
331 * multiple blocks in parallel. This is reflected by the walksize, which
332 * must be a multiple of the chunksize (or equal if the concern does not
333 * apply)
334 *
335 * Return: walk size in bytes
336 */
337static inline unsigned int crypto_skcipher_walksize(
338 struct crypto_skcipher *tfm)
339{
340 return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
341}
342
343/**
310 * crypto_skcipher_blocksize() - obtain block size of cipher 344 * crypto_skcipher_blocksize() - obtain block size of cipher
311 * @tfm: cipher handle 345 * @tfm: cipher handle
312 * 346 *