aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/shash.c
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2019-04-14 20:37:09 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2019-04-25 03:38:12 -0400
commit877b5691f27a1aec0d9b53095a323e45c30069e2 (patch)
tree59eba93e8d253fb0e12a0a2040de99e96e873933 /crypto/shash.c
parent75f2222832e0fecba7a45ca6ac07ea895ea1e046 (diff)
crypto: shash - remove shash_desc::flags
The flags field in 'struct shash_desc' never actually does anything. The only ostensibly supported flag is CRYPTO_TFM_REQ_MAY_SLEEP. However, no shash algorithm ever sleeps, making this flag a no-op. With this being the case, inevitably some users who can't sleep wrongly pass MAY_SLEEP. These would all need to be fixed if any shash algorithm actually started sleeping. For example, the shash_ahash_*() functions, which wrap a shash algorithm with the ahash API, pass through MAY_SLEEP from the ahash API to the shash API. However, the shash functions are called under kmap_atomic(), so actually they're assumed to never sleep. Even if it turns out that some users do need preemption points while hashing large buffers, we could easily provide a helper function crypto_shash_update_large() which divides the data into smaller chunks and calls crypto_shash_update() and cond_resched() for each chunk. It's not necessary to have a flag in 'struct shash_desc', nor is it necessary to make individual shash algorithms aware of this at all. Therefore, remove shash_desc::flags, and document that the crypto_shash_*() functions can be called from any context. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/shash.c')
-rw-r--r--crypto/shash.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/crypto/shash.c b/crypto/shash.c
index 599468478f7b..e55c1f558bc3 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -238,7 +238,6 @@ static int shash_async_init(struct ahash_request *req)
238 struct shash_desc *desc = ahash_request_ctx(req); 238 struct shash_desc *desc = ahash_request_ctx(req);
239 239
240 desc->tfm = *ctx; 240 desc->tfm = *ctx;
241 desc->flags = req->base.flags;
242 241
243 return crypto_shash_init(desc); 242 return crypto_shash_init(desc);
244} 243}
@@ -293,7 +292,6 @@ static int shash_async_finup(struct ahash_request *req)
293 struct shash_desc *desc = ahash_request_ctx(req); 292 struct shash_desc *desc = ahash_request_ctx(req);
294 293
295 desc->tfm = *ctx; 294 desc->tfm = *ctx;
296 desc->flags = req->base.flags;
297 295
298 return shash_ahash_finup(req, desc); 296 return shash_ahash_finup(req, desc);
299} 297}
@@ -328,7 +326,6 @@ static int shash_async_digest(struct ahash_request *req)
328 struct shash_desc *desc = ahash_request_ctx(req); 326 struct shash_desc *desc = ahash_request_ctx(req);
329 327
330 desc->tfm = *ctx; 328 desc->tfm = *ctx;
331 desc->flags = req->base.flags;
332 329
333 return shash_ahash_digest(req, desc); 330 return shash_ahash_digest(req, desc);
334} 331}
@@ -344,7 +341,6 @@ static int shash_async_import(struct ahash_request *req, const void *in)
344 struct shash_desc *desc = ahash_request_ctx(req); 341 struct shash_desc *desc = ahash_request_ctx(req);
345 342
346 desc->tfm = *ctx; 343 desc->tfm = *ctx;
347 desc->flags = req->base.flags;
348 344
349 return crypto_shash_import(desc, in); 345 return crypto_shash_import(desc, in);
350} 346}