aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2005-11-12 19:17:33 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-01-09 17:15:46 -0500
commitfa9b98fdab5b57ecb4dd3d6c2489e262af458c44 (patch)
tree454d374f957985d5931378d86090b6aca5bbc877
parent9d70a6c86cd86e111291bd0d506028ecb9649923 (diff)
[CRYPTO] sha1: Avoid shifting count left and right
This patch avoids shifting the count left and right needlessly for each call to sha1_update(). It instead can be done only once at the end in sha1_final(). Keeping the previous test example (sha1_update() successively called with len=64), a 1.3% performance increase can be observed on i386, or 0.2% on ARM. The generated code is also smaller on ARM. Signed-off-by: Nicolas Pitre <nico@cam.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/sha1.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/crypto/sha1.c b/crypto/sha1.c
index 7b1abca29365..8048e2dd3c14 100644
--- a/crypto/sha1.c
+++ b/crypto/sha1.c
@@ -52,8 +52,8 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len)
52 unsigned int partial, done; 52 unsigned int partial, done;
53 const u8 *src; 53 const u8 *src;
54 54
55 partial = (sctx->count >> 3) & 0x3f; 55 partial = sctx->count & 0x3f;
56 sctx->count += len << 3; 56 sctx->count += len;
57 done = 0; 57 done = 0;
58 src = data; 58 src = data;
59 59
@@ -88,10 +88,10 @@ static void sha1_final(void* ctx, u8 *out)
88 __be64 bits; 88 __be64 bits;
89 static const u8 padding[64] = { 0x80, }; 89 static const u8 padding[64] = { 0x80, };
90 90
91 bits = cpu_to_be64(sctx->count); 91 bits = cpu_to_be64(sctx->count << 3);
92 92
93 /* Pad out to 56 mod 64 */ 93 /* Pad out to 56 mod 64 */
94 index = (sctx->count >> 3) & 0x3f; 94 index = sctx->count & 0x3f;
95 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 95 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
96 sha1_update(sctx, padding, padlen); 96 sha1_update(sctx, padding, padlen);
97 97