aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/crypto/sha1_s390.c
diff options
context:
space:
mode:
authorJan Glauber <jang@linux.vnet.ibm.com>2008-03-06 06:50:20 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2008-04-20 22:19:11 -0400
commit604973f1fe41b817c1badb3df2008fe641e50ae6 (patch)
treedca1cf2234a9d8178e45952f4ff0e95f386def5c /arch/s390/crypto/sha1_s390.c
parent607424d8583365418a337aa51e83403c8bd213ed (diff)
[CRYPTO] s390: Generic sha_update and sha_final
The sha_{update|final} functions are similar for every sha variant. Since that is error-prone and redundant replace these functions by a shared generic implementation for s390. Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/s390/crypto/sha1_s390.c')
-rw-r--r--arch/s390/crypto/sha1_s390.c91
1 files changed, 6 insertions, 85 deletions
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 9cf9eca22747..b3cb5a89b00d 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -29,16 +29,11 @@
29#include <crypto/sha.h> 29#include <crypto/sha.h>
30 30
31#include "crypt_s390.h" 31#include "crypt_s390.h"
32 32#include "sha.h"
33struct s390_sha1_ctx {
34 u64 count; /* message length */
35 u32 state[5];
36 u8 buf[2 * SHA1_BLOCK_SIZE];
37};
38 33
39static void sha1_init(struct crypto_tfm *tfm) 34static void sha1_init(struct crypto_tfm *tfm)
40{ 35{
41 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 36 struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm);
42 37
43 sctx->state[0] = SHA1_H0; 38 sctx->state[0] = SHA1_H0;
44 sctx->state[1] = SHA1_H1; 39 sctx->state[1] = SHA1_H1;
@@ -46,79 +41,7 @@ static void sha1_init(struct crypto_tfm *tfm)
46 sctx->state[3] = SHA1_H3; 41 sctx->state[3] = SHA1_H3;
47 sctx->state[4] = SHA1_H4; 42 sctx->state[4] = SHA1_H4;
48 sctx->count = 0; 43 sctx->count = 0;
49} 44 sctx->func = KIMD_SHA_1;
50
51static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
52 unsigned int len)
53{
54 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
55 unsigned int index;
56 int ret;
57
58 /* how much is already in the buffer? */
59 index = sctx->count & 0x3f;
60
61 sctx->count += len;
62
63 if (index + len < SHA1_BLOCK_SIZE)
64 goto store;
65
66 /* process one stored block */
67 if (index) {
68 memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index);
69 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf,
70 SHA1_BLOCK_SIZE);
71 BUG_ON(ret != SHA1_BLOCK_SIZE);
72 data += SHA1_BLOCK_SIZE - index;
73 len -= SHA1_BLOCK_SIZE - index;
74 }
75
76 /* process as many blocks as possible */
77 if (len >= SHA1_BLOCK_SIZE) {
78 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data,
79 len & ~(SHA1_BLOCK_SIZE - 1));
80 BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1)));
81 data += ret;
82 len -= ret;
83 }
84
85store:
86 /* anything left? */
87 if (len)
88 memcpy(sctx->buf + index , data, len);
89}
90
91/* Add padding and return the message digest. */
92static void sha1_final(struct crypto_tfm *tfm, u8 *out)
93{
94 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
95 u64 bits;
96 unsigned int index, end;
97 int ret;
98
99 /* must perform manual padding */
100 index = sctx->count & 0x3f;
101 end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE);
102
103 /* start pad with 1 */
104 sctx->buf[index] = 0x80;
105
106 /* pad with zeros */
107 index++;
108 memset(sctx->buf + index, 0x00, end - index - 8);
109
110 /* append message length */
111 bits = sctx->count * 8;
112 memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
113
114 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end);
115 BUG_ON(ret != end);
116
117 /* copy digest to out */
118 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
119
120 /* wipe context */
121 memset(sctx, 0, sizeof *sctx);
122} 45}
123 46
124static struct crypto_alg alg = { 47static struct crypto_alg alg = {
@@ -127,21 +50,20 @@ static struct crypto_alg alg = {
127 .cra_priority = CRYPT_S390_PRIORITY, 50 .cra_priority = CRYPT_S390_PRIORITY,
128 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 51 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
129 .cra_blocksize = SHA1_BLOCK_SIZE, 52 .cra_blocksize = SHA1_BLOCK_SIZE,
130 .cra_ctxsize = sizeof(struct s390_sha1_ctx), 53 .cra_ctxsize = sizeof(struct s390_sha_ctx),
131 .cra_module = THIS_MODULE, 54 .cra_module = THIS_MODULE,
132 .cra_list = LIST_HEAD_INIT(alg.cra_list), 55 .cra_list = LIST_HEAD_INIT(alg.cra_list),
133 .cra_u = { .digest = { 56 .cra_u = { .digest = {
134 .dia_digestsize = SHA1_DIGEST_SIZE, 57 .dia_digestsize = SHA1_DIGEST_SIZE,
135 .dia_init = sha1_init, 58 .dia_init = sha1_init,
136 .dia_update = sha1_update, 59 .dia_update = s390_sha_update,
137 .dia_final = sha1_final } } 60 .dia_final = s390_sha_final } }
138}; 61};
139 62
140static int __init sha1_s390_init(void) 63static int __init sha1_s390_init(void)
141{ 64{
142 if (!crypt_s390_func_available(KIMD_SHA_1)) 65 if (!crypt_s390_func_available(KIMD_SHA_1))
143 return -EOPNOTSUPP; 66 return -EOPNOTSUPP;
144
145 return crypto_register_alg(&alg); 67 return crypto_register_alg(&alg);
146} 68}
147 69
@@ -154,6 +76,5 @@ module_init(sha1_s390_init);
154module_exit(sha1_s390_fini); 76module_exit(sha1_s390_fini);
155 77
156MODULE_ALIAS("sha1"); 78MODULE_ALIAS("sha1");
157
158MODULE_LICENSE("GPL"); 79MODULE_LICENSE("GPL");
159MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); 80MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");