diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2009-07-11 06:16:16 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2009-07-14 00:58:02 -0400 |
commit | bbbee4679afd7c29a58087f133ffe241597f3144 (patch) | |
tree | b49da1e7b303948fc1d39580df375071c1703494 /drivers/crypto | |
parent | 113adefc73c291f93f875fe515a46d8f76252fff (diff) |
crypto: padlock - Switch sha to shash
This patch converts the padlock-sha implementation to shash.
In doing so the existing mechanism of storing the data until
final is no longer viable as we do not have a way of allocating
data in crypto_shash_init and then reliably freeing it.
This is just as well because a better way of handling the problem
is to hash everything but the last chunk using normal sha code
and then provide the intermediate result to the padlock device.
This is good enough because the primary application of padlock-sha
is IPsec and there the data is laid out in the form of an hmac
header followed by the rest of the packet. In essence we can
provide all the data to the padlock as the hmac header only needs
to be hashed once.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 2 | ||||
-rw-r--r-- | drivers/crypto/padlock-sha.c | 333 |
2 files changed, 156 insertions, 179 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5b27692372bf..1bb4b7fe4585 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -13,7 +13,6 @@ if CRYPTO_HW | |||
13 | config CRYPTO_DEV_PADLOCK | 13 | config CRYPTO_DEV_PADLOCK |
14 | tristate "Support for VIA PadLock ACE" | 14 | tristate "Support for VIA PadLock ACE" |
15 | depends on X86 && !UML | 15 | depends on X86 && !UML |
16 | select CRYPTO_ALGAPI | ||
17 | help | 16 | help |
18 | Some VIA processors come with an integrated crypto engine | 17 | Some VIA processors come with an integrated crypto engine |
19 | (so called VIA PadLock ACE, Advanced Cryptography Engine) | 18 | (so called VIA PadLock ACE, Advanced Cryptography Engine) |
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES | |||
39 | config CRYPTO_DEV_PADLOCK_SHA | 38 | config CRYPTO_DEV_PADLOCK_SHA |
40 | tristate "PadLock driver for SHA1 and SHA256 algorithms" | 39 | tristate "PadLock driver for SHA1 and SHA256 algorithms" |
41 | depends on CRYPTO_DEV_PADLOCK | 40 | depends on CRYPTO_DEV_PADLOCK |
41 | select CRYPTO_HASH | ||
42 | select CRYPTO_SHA1 | 42 | select CRYPTO_SHA1 |
43 | select CRYPTO_SHA256 | 43 | select CRYPTO_SHA256 |
44 | help | 44 | help |
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 868da54b1850..fb6e6c343148 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -24,73 +24,31 @@ | |||
24 | #include <asm/i387.h> | 24 | #include <asm/i387.h> |
25 | #include "padlock.h" | 25 | #include "padlock.h" |
26 | 26 | ||
27 | struct padlock_sha_ctx { | 27 | struct padlock_sha_desc { |
28 | char *data; | 28 | struct shash_desc fallback; |
29 | size_t used; | ||
30 | int bypass; | ||
31 | void (*f_sha_padlock)(const char *in, char *out, int count); | ||
32 | struct shash_desc *fallback; | ||
33 | }; | 29 | }; |
34 | 30 | ||
35 | static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) | 31 | struct padlock_sha_ctx { |
36 | { | 32 | struct crypto_shash *fallback; |
37 | return crypto_tfm_ctx(tfm); | 33 | }; |
38 | } | ||
39 | |||
40 | /* We'll need aligned address on the stack */ | ||
41 | #define NEAREST_ALIGNED(ptr) \ | ||
42 | ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) | ||
43 | |||
44 | static struct crypto_alg sha1_alg, sha256_alg; | ||
45 | 34 | ||
46 | static int padlock_sha_bypass(struct crypto_tfm *tfm) | 35 | static int padlock_sha_init(struct shash_desc *desc) |
47 | { | 36 | { |
48 | int err = 0; | 37 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
49 | 38 | struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); | |
50 | if (ctx(tfm)->bypass) | ||
51 | goto out; | ||
52 | 39 | ||
53 | err = crypto_shash_init(ctx(tfm)->fallback); | 40 | dctx->fallback.tfm = ctx->fallback; |
54 | if (err) | 41 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
55 | goto out; | 42 | return crypto_shash_init(&dctx->fallback); |
56 | |||
57 | if (ctx(tfm)->data && ctx(tfm)->used) | ||
58 | err = crypto_shash_update(ctx(tfm)->fallback, ctx(tfm)->data, | ||
59 | ctx(tfm)->used); | ||
60 | |||
61 | ctx(tfm)->used = 0; | ||
62 | ctx(tfm)->bypass = 1; | ||
63 | |||
64 | out: | ||
65 | return err; | ||
66 | } | 43 | } |
67 | 44 | ||
68 | static void padlock_sha_init(struct crypto_tfm *tfm) | 45 | static int padlock_sha_update(struct shash_desc *desc, |
46 | const u8 *data, unsigned int length) | ||
69 | { | 47 | { |
70 | ctx(tfm)->used = 0; | 48 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
71 | ctx(tfm)->bypass = 0; | ||
72 | } | ||
73 | 49 | ||
74 | static void padlock_sha_update(struct crypto_tfm *tfm, | 50 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
75 | const uint8_t *data, unsigned int length) | 51 | return crypto_shash_update(&dctx->fallback, data, length); |
76 | { | ||
77 | int err; | ||
78 | |||
79 | /* Our buffer is always one page. */ | ||
80 | if (unlikely(!ctx(tfm)->bypass && | ||
81 | (ctx(tfm)->used + length > PAGE_SIZE))) { | ||
82 | err = padlock_sha_bypass(tfm); | ||
83 | BUG_ON(err); | ||
84 | } | ||
85 | |||
86 | if (unlikely(ctx(tfm)->bypass)) { | ||
87 | err = crypto_shash_update(ctx(tfm)->fallback, data, length); | ||
88 | BUG_ON(err); | ||
89 | return; | ||
90 | } | ||
91 | |||
92 | memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); | ||
93 | ctx(tfm)->used += length; | ||
94 | } | 52 | } |
95 | 53 | ||
96 | static inline void padlock_output_block(uint32_t *src, | 54 | static inline void padlock_output_block(uint32_t *src, |
@@ -100,88 +58,138 @@ static inline void padlock_output_block(uint32_t *src, | |||
100 | *dst++ = swab32(*src++); | 58 | *dst++ = swab32(*src++); |
101 | } | 59 | } |
102 | 60 | ||
103 | static void padlock_do_sha1(const char *in, char *out, int count) | 61 | static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, |
62 | unsigned int count, u8 *out) | ||
104 | { | 63 | { |
105 | /* We can't store directly to *out as it may be unaligned. */ | 64 | /* We can't store directly to *out as it may be unaligned. */ |
106 | /* BTW Don't reduce the buffer size below 128 Bytes! | 65 | /* BTW Don't reduce the buffer size below 128 Bytes! |
107 | * PadLock microcode needs it that big. */ | 66 | * PadLock microcode needs it that big. */ |
108 | char buf[128+16]; | 67 | char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); |
109 | char *result = NEAREST_ALIGNED(buf); | 68 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
69 | struct sha1_state state; | ||
70 | unsigned int space; | ||
71 | unsigned int leftover; | ||
110 | int ts_state; | 72 | int ts_state; |
73 | int err; | ||
74 | |||
75 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
76 | err = crypto_shash_export(&dctx->fallback, &state); | ||
77 | if (err) | ||
78 | goto out; | ||
79 | |||
80 | if (state.count + count > ULONG_MAX) | ||
81 | return crypto_shash_finup(&dctx->fallback, in, count, out); | ||
82 | |||
83 | leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1; | ||
84 | space = SHA1_BLOCK_SIZE - leftover; | ||
85 | if (space) { | ||
86 | if (count > space) { | ||
87 | err = crypto_shash_update(&dctx->fallback, in, space) ?: | ||
88 | crypto_shash_export(&dctx->fallback, &state); | ||
89 | if (err) | ||
90 | goto out; | ||
91 | count -= space; | ||
92 | in += space; | ||
93 | } else { | ||
94 | memcpy(state.buffer + leftover, in, count); | ||
95 | in = state.buffer; | ||
96 | count += leftover; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | memcpy(result, &state.state, SHA1_DIGEST_SIZE); | ||
111 | 101 | ||
112 | ((uint32_t *)result)[0] = SHA1_H0; | ||
113 | ((uint32_t *)result)[1] = SHA1_H1; | ||
114 | ((uint32_t *)result)[2] = SHA1_H2; | ||
115 | ((uint32_t *)result)[3] = SHA1_H3; | ||
116 | ((uint32_t *)result)[4] = SHA1_H4; | ||
117 | |||
118 | /* prevent taking the spurious DNA fault with padlock. */ | 102 | /* prevent taking the spurious DNA fault with padlock. */ |
119 | ts_state = irq_ts_save(); | 103 | ts_state = irq_ts_save(); |
120 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ | 104 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ |
121 | : "+S"(in), "+D"(result) | 105 | : \ |
122 | : "c"(count), "a"(0)); | 106 | : "c"(state.count + count), "a"(state.count), \ |
107 | "S"(in), "D"(result)); | ||
123 | irq_ts_restore(ts_state); | 108 | irq_ts_restore(ts_state); |
124 | 109 | ||
125 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); | 110 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); |
111 | |||
112 | out: | ||
113 | return err; | ||
126 | } | 114 | } |
127 | 115 | ||
128 | static void padlock_do_sha256(const char *in, char *out, int count) | 116 | static int padlock_sha1_final(struct shash_desc *desc, u8 *out) |
117 | { | ||
118 | u8 buf[4]; | ||
119 | |||
120 | return padlock_sha1_finup(desc, buf, 0, out); | ||
121 | } | ||
122 | |||
123 | static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, | ||
124 | unsigned int count, u8 *out) | ||
129 | { | 125 | { |
130 | /* We can't store directly to *out as it may be unaligned. */ | 126 | /* We can't store directly to *out as it may be unaligned. */ |
131 | /* BTW Don't reduce the buffer size below 128 Bytes! | 127 | /* BTW Don't reduce the buffer size below 128 Bytes! |
132 | * PadLock microcode needs it that big. */ | 128 | * PadLock microcode needs it that big. */ |
133 | char buf[128+16]; | 129 | char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); |
134 | char *result = NEAREST_ALIGNED(buf); | 130 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
131 | struct sha256_state state; | ||
132 | unsigned int space; | ||
133 | unsigned int leftover; | ||
135 | int ts_state; | 134 | int ts_state; |
135 | int err; | ||
136 | 136 | ||
137 | ((uint32_t *)result)[0] = SHA256_H0; | 137 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
138 | ((uint32_t *)result)[1] = SHA256_H1; | 138 | err = crypto_shash_export(&dctx->fallback, &state); |
139 | ((uint32_t *)result)[2] = SHA256_H2; | 139 | if (err) |
140 | ((uint32_t *)result)[3] = SHA256_H3; | 140 | goto out; |
141 | ((uint32_t *)result)[4] = SHA256_H4; | 141 | |
142 | ((uint32_t *)result)[5] = SHA256_H5; | 142 | if (state.count + count > ULONG_MAX) |
143 | ((uint32_t *)result)[6] = SHA256_H6; | 143 | return crypto_shash_finup(&dctx->fallback, in, count, out); |
144 | ((uint32_t *)result)[7] = SHA256_H7; | 144 | |
145 | leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1; | ||
146 | space = SHA256_BLOCK_SIZE - leftover; | ||
147 | if (space) { | ||
148 | if (count > space) { | ||
149 | err = crypto_shash_update(&dctx->fallback, in, space) ?: | ||
150 | crypto_shash_export(&dctx->fallback, &state); | ||
151 | if (err) | ||
152 | goto out; | ||
153 | count -= space; | ||
154 | in += space; | ||
155 | } else { | ||
156 | memcpy(state.buf + leftover, in, count); | ||
157 | in = state.buf; | ||
158 | count += leftover; | ||
159 | } | ||
160 | } | ||
161 | |||
162 | memcpy(result, &state.state, SHA256_DIGEST_SIZE); | ||
145 | 163 | ||
146 | /* prevent taking the spurious DNA fault with padlock. */ | 164 | /* prevent taking the spurious DNA fault with padlock. */ |
147 | ts_state = irq_ts_save(); | 165 | ts_state = irq_ts_save(); |
148 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ | 166 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ |
149 | : "+S"(in), "+D"(result) | 167 | : \ |
150 | : "c"(count), "a"(0)); | 168 | : "c"(state.count + count), "a"(state.count), \ |
169 | "S"(in), "D"(result)); | ||
151 | irq_ts_restore(ts_state); | 170 | irq_ts_restore(ts_state); |
152 | 171 | ||
153 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); | 172 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); |
173 | |||
174 | out: | ||
175 | return err; | ||
154 | } | 176 | } |
155 | 177 | ||
156 | static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) | 178 | static int padlock_sha256_final(struct shash_desc *desc, u8 *out) |
157 | { | 179 | { |
158 | int err; | 180 | u8 buf[4]; |
159 | |||
160 | if (unlikely(ctx(tfm)->bypass)) { | ||
161 | err = crypto_shash_final(ctx(tfm)->fallback, out); | ||
162 | BUG_ON(err); | ||
163 | ctx(tfm)->bypass = 0; | ||
164 | return; | ||
165 | } | ||
166 | 181 | ||
167 | /* Pass the input buffer to PadLock microcode... */ | 182 | return padlock_sha256_finup(desc, buf, 0, out); |
168 | ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); | ||
169 | |||
170 | ctx(tfm)->used = 0; | ||
171 | } | 183 | } |
172 | 184 | ||
173 | static int padlock_cra_init(struct crypto_tfm *tfm) | 185 | static int padlock_cra_init(struct crypto_tfm *tfm) |
174 | { | 186 | { |
187 | struct crypto_shash *hash = __crypto_shash_cast(tfm); | ||
175 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 188 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; |
189 | struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); | ||
176 | struct crypto_shash *fallback_tfm; | 190 | struct crypto_shash *fallback_tfm; |
177 | int err = -ENOMEM; | 191 | int err = -ENOMEM; |
178 | 192 | ||
179 | /* For now we'll allocate one page. This | ||
180 | * could eventually be configurable one day. */ | ||
181 | ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); | ||
182 | if (!ctx(tfm)->data) | ||
183 | goto out; | ||
184 | |||
185 | /* Allocate a fallback and abort if it failed. */ | 193 | /* Allocate a fallback and abort if it failed. */ |
186 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, | 194 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, |
187 | CRYPTO_ALG_NEED_FALLBACK); | 195 | CRYPTO_ALG_NEED_FALLBACK); |
@@ -189,94 +197,63 @@ static int padlock_cra_init(struct crypto_tfm *tfm) | |||
189 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", | 197 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", |
190 | fallback_driver_name); | 198 | fallback_driver_name); |
191 | err = PTR_ERR(fallback_tfm); | 199 | err = PTR_ERR(fallback_tfm); |
192 | goto out_free_page; | 200 | goto out; |
193 | } | 201 | } |
194 | 202 | ||
195 | ctx(tfm)->fallback = kmalloc(sizeof(struct shash_desc) + | 203 | ctx->fallback = fallback_tfm; |
196 | crypto_shash_descsize(fallback_tfm), | 204 | hash->descsize += crypto_shash_descsize(fallback_tfm); |
197 | GFP_KERNEL); | ||
198 | if (!ctx(tfm)->fallback) | ||
199 | goto out_free_tfm; | ||
200 | |||
201 | ctx(tfm)->fallback->tfm = fallback_tfm; | ||
202 | ctx(tfm)->fallback->flags = 0; | ||
203 | return 0; | 205 | return 0; |
204 | 206 | ||
205 | out_free_tfm: | ||
206 | crypto_free_shash(fallback_tfm); | ||
207 | out_free_page: | ||
208 | free_page((unsigned long)(ctx(tfm)->data)); | ||
209 | out: | 207 | out: |
210 | return err; | 208 | return err; |
211 | } | 209 | } |
212 | 210 | ||
213 | static int padlock_sha1_cra_init(struct crypto_tfm *tfm) | ||
214 | { | ||
215 | ctx(tfm)->f_sha_padlock = padlock_do_sha1; | ||
216 | |||
217 | return padlock_cra_init(tfm); | ||
218 | } | ||
219 | |||
220 | static int padlock_sha256_cra_init(struct crypto_tfm *tfm) | ||
221 | { | ||
222 | ctx(tfm)->f_sha_padlock = padlock_do_sha256; | ||
223 | |||
224 | return padlock_cra_init(tfm); | ||
225 | } | ||
226 | |||
227 | static void padlock_cra_exit(struct crypto_tfm *tfm) | 211 | static void padlock_cra_exit(struct crypto_tfm *tfm) |
228 | { | 212 | { |
229 | if (ctx(tfm)->data) { | 213 | struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); |
230 | free_page((unsigned long)(ctx(tfm)->data)); | ||
231 | ctx(tfm)->data = NULL; | ||
232 | } | ||
233 | |||
234 | crypto_free_shash(ctx(tfm)->fallback->tfm); | ||
235 | 214 | ||
236 | kzfree(ctx(tfm)->fallback); | 215 | crypto_free_shash(ctx->fallback); |
237 | } | 216 | } |
238 | 217 | ||
239 | static struct crypto_alg sha1_alg = { | 218 | static struct shash_alg sha1_alg = { |
240 | .cra_name = "sha1", | 219 | .digestsize = SHA1_DIGEST_SIZE, |
241 | .cra_driver_name = "sha1-padlock", | 220 | .init = padlock_sha_init, |
242 | .cra_priority = PADLOCK_CRA_PRIORITY, | 221 | .update = padlock_sha_update, |
243 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | 222 | .finup = padlock_sha1_finup, |
244 | CRYPTO_ALG_NEED_FALLBACK, | 223 | .final = padlock_sha1_final, |
245 | .cra_blocksize = SHA1_BLOCK_SIZE, | 224 | .descsize = sizeof(struct padlock_sha_desc), |
246 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | 225 | .base = { |
247 | .cra_module = THIS_MODULE, | 226 | .cra_name = "sha1", |
248 | .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), | 227 | .cra_driver_name = "sha1-padlock", |
249 | .cra_init = padlock_sha1_cra_init, | 228 | .cra_priority = PADLOCK_CRA_PRIORITY, |
250 | .cra_exit = padlock_cra_exit, | 229 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | |
251 | .cra_u = { | 230 | CRYPTO_ALG_NEED_FALLBACK, |
252 | .digest = { | 231 | .cra_blocksize = SHA1_BLOCK_SIZE, |
253 | .dia_digestsize = SHA1_DIGEST_SIZE, | 232 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), |
254 | .dia_init = padlock_sha_init, | 233 | .cra_module = THIS_MODULE, |
255 | .dia_update = padlock_sha_update, | 234 | .cra_init = padlock_cra_init, |
256 | .dia_final = padlock_sha_final, | 235 | .cra_exit = padlock_cra_exit, |
257 | } | ||
258 | } | 236 | } |
259 | }; | 237 | }; |
260 | 238 | ||
261 | static struct crypto_alg sha256_alg = { | 239 | static struct shash_alg sha256_alg = { |
262 | .cra_name = "sha256", | 240 | .digestsize = SHA256_DIGEST_SIZE, |
263 | .cra_driver_name = "sha256-padlock", | 241 | .init = padlock_sha_init, |
264 | .cra_priority = PADLOCK_CRA_PRIORITY, | 242 | .update = padlock_sha_update, |
265 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | 243 | .finup = padlock_sha256_finup, |
266 | CRYPTO_ALG_NEED_FALLBACK, | 244 | .final = padlock_sha256_final, |
267 | .cra_blocksize = SHA256_BLOCK_SIZE, | 245 | .descsize = sizeof(struct padlock_sha_desc), |
268 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | 246 | .base = { |
269 | .cra_module = THIS_MODULE, | 247 | .cra_name = "sha256", |
270 | .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), | 248 | .cra_driver_name = "sha256-padlock", |
271 | .cra_init = padlock_sha256_cra_init, | 249 | .cra_priority = PADLOCK_CRA_PRIORITY, |
272 | .cra_exit = padlock_cra_exit, | 250 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | |
273 | .cra_u = { | 251 | CRYPTO_ALG_NEED_FALLBACK, |
274 | .digest = { | 252 | .cra_blocksize = SHA256_BLOCK_SIZE, |
275 | .dia_digestsize = SHA256_DIGEST_SIZE, | 253 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), |
276 | .dia_init = padlock_sha_init, | 254 | .cra_module = THIS_MODULE, |
277 | .dia_update = padlock_sha_update, | 255 | .cra_init = padlock_cra_init, |
278 | .dia_final = padlock_sha_final, | 256 | .cra_exit = padlock_cra_exit, |
279 | } | ||
280 | } | 257 | } |
281 | }; | 258 | }; |
282 | 259 | ||
@@ -294,11 +271,11 @@ static int __init padlock_init(void) | |||
294 | return -ENODEV; | 271 | return -ENODEV; |
295 | } | 272 | } |
296 | 273 | ||
297 | rc = crypto_register_alg(&sha1_alg); | 274 | rc = crypto_register_shash(&sha1_alg); |
298 | if (rc) | 275 | if (rc) |
299 | goto out; | 276 | goto out; |
300 | 277 | ||
301 | rc = crypto_register_alg(&sha256_alg); | 278 | rc = crypto_register_shash(&sha256_alg); |
302 | if (rc) | 279 | if (rc) |
303 | goto out_unreg1; | 280 | goto out_unreg1; |
304 | 281 | ||
@@ -307,7 +284,7 @@ static int __init padlock_init(void) | |||
307 | return 0; | 284 | return 0; |
308 | 285 | ||
309 | out_unreg1: | 286 | out_unreg1: |
310 | crypto_unregister_alg(&sha1_alg); | 287 | crypto_unregister_shash(&sha1_alg); |
311 | out: | 288 | out: |
312 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); | 289 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); |
313 | return rc; | 290 | return rc; |
@@ -315,8 +292,8 @@ out: | |||
315 | 292 | ||
316 | static void __exit padlock_fini(void) | 293 | static void __exit padlock_fini(void) |
317 | { | 294 | { |
318 | crypto_unregister_alg(&sha1_alg); | 295 | crypto_unregister_shash(&sha1_alg); |
319 | crypto_unregister_alg(&sha256_alg); | 296 | crypto_unregister_shash(&sha256_alg); |
320 | } | 297 | } |
321 | 298 | ||
322 | module_init(padlock_init); | 299 | module_init(padlock_init); |