aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/padlock-sha.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/padlock-sha.c')
-rw-r--r--drivers/crypto/padlock-sha.c329
1 files changed, 166 insertions, 163 deletions
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a2c8e8514b63..76cb6b345e7b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -12,81 +12,43 @@
12 * 12 *
13 */ 13 */
14 14
15#include <crypto/algapi.h> 15#include <crypto/internal/hash.h>
16#include <crypto/sha.h> 16#include <crypto/sha.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/cryptohash.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/i387.h> 24#include <asm/i387.h>
26#include "padlock.h" 25#include "padlock.h"
27 26
28#define SHA1_DEFAULT_FALLBACK "sha1-generic" 27struct padlock_sha_desc {
29#define SHA256_DEFAULT_FALLBACK "sha256-generic" 28 struct shash_desc fallback;
29};
30 30
31struct padlock_sha_ctx { 31struct padlock_sha_ctx {
32 char *data; 32 struct crypto_shash *fallback;
33 size_t used;
34 int bypass;
35 void (*f_sha_padlock)(const char *in, char *out, int count);
36 struct hash_desc fallback;
37}; 33};
38 34
39static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) 35static int padlock_sha_init(struct shash_desc *desc)
40{
41 return crypto_tfm_ctx(tfm);
42}
43
44/* We'll need aligned address on the stack */
45#define NEAREST_ALIGNED(ptr) \
46 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
47
48static struct crypto_alg sha1_alg, sha256_alg;
49
50static void padlock_sha_bypass(struct crypto_tfm *tfm)
51{ 36{
52 if (ctx(tfm)->bypass) 37 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
53 return; 38 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
54 39
55 crypto_hash_init(&ctx(tfm)->fallback); 40 dctx->fallback.tfm = ctx->fallback;
56 if (ctx(tfm)->data && ctx(tfm)->used) { 41 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
57 struct scatterlist sg; 42 return crypto_shash_init(&dctx->fallback);
58
59 sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
60 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
61 }
62
63 ctx(tfm)->used = 0;
64 ctx(tfm)->bypass = 1;
65}
66
67static void padlock_sha_init(struct crypto_tfm *tfm)
68{
69 ctx(tfm)->used = 0;
70 ctx(tfm)->bypass = 0;
71} 43}
72 44
73static void padlock_sha_update(struct crypto_tfm *tfm, 45static int padlock_sha_update(struct shash_desc *desc,
74 const uint8_t *data, unsigned int length) 46 const u8 *data, unsigned int length)
75{ 47{
76 /* Our buffer is always one page. */ 48 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
77 if (unlikely(!ctx(tfm)->bypass &&
78 (ctx(tfm)->used + length > PAGE_SIZE)))
79 padlock_sha_bypass(tfm);
80
81 if (unlikely(ctx(tfm)->bypass)) {
82 struct scatterlist sg;
83 sg_init_one(&sg, (uint8_t *)data, length);
84 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
85 return;
86 }
87 49
88 memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); 50 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
89 ctx(tfm)->used += length; 51 return crypto_shash_update(&dctx->fallback, data, length);
90} 52}
91 53
92static inline void padlock_output_block(uint32_t *src, 54static inline void padlock_output_block(uint32_t *src,
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src,
96 *dst++ = swab32(*src++); 58 *dst++ = swab32(*src++);
97} 59}
98 60
99static void padlock_do_sha1(const char *in, char *out, int count) 61static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
62 unsigned int count, u8 *out)
100{ 63{
101 /* We can't store directly to *out as it may be unaligned. */ 64 /* We can't store directly to *out as it may be unaligned. */
102 /* BTW Don't reduce the buffer size below 128 Bytes! 65 /* BTW Don't reduce the buffer size below 128 Bytes!
103 * PadLock microcode needs it that big. */ 66 * PadLock microcode needs it that big. */
104 char buf[128+16]; 67 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
105 char *result = NEAREST_ALIGNED(buf); 68 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
69 struct sha1_state state;
70 unsigned int space;
71 unsigned int leftover;
106 int ts_state; 72 int ts_state;
73 int err;
74
75 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
76 err = crypto_shash_export(&dctx->fallback, &state);
77 if (err)
78 goto out;
79
80 if (state.count + count > ULONG_MAX)
81 return crypto_shash_finup(&dctx->fallback, in, count, out);
82
83 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
84 space = SHA1_BLOCK_SIZE - leftover;
85 if (space) {
86 if (count > space) {
87 err = crypto_shash_update(&dctx->fallback, in, space) ?:
88 crypto_shash_export(&dctx->fallback, &state);
89 if (err)
90 goto out;
91 count -= space;
92 in += space;
93 } else {
94 memcpy(state.buffer + leftover, in, count);
95 in = state.buffer;
96 count += leftover;
97 state.count &= ~(SHA1_BLOCK_SIZE - 1);
98 }
99 }
100
101 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
107 102
108 ((uint32_t *)result)[0] = SHA1_H0;
109 ((uint32_t *)result)[1] = SHA1_H1;
110 ((uint32_t *)result)[2] = SHA1_H2;
111 ((uint32_t *)result)[3] = SHA1_H3;
112 ((uint32_t *)result)[4] = SHA1_H4;
113
114 /* prevent taking the spurious DNA fault with padlock. */ 103 /* prevent taking the spurious DNA fault with padlock. */
115 ts_state = irq_ts_save(); 104 ts_state = irq_ts_save();
116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ 105 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117 : "+S"(in), "+D"(result) 106 : \
118 : "c"(count), "a"(0)); 107 : "c"((unsigned long)state.count + count), \
108 "a"((unsigned long)state.count), \
109 "S"(in), "D"(result));
119 irq_ts_restore(ts_state); 110 irq_ts_restore(ts_state);
120 111
121 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); 112 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
113
114out:
115 return err;
122} 116}
123 117
124static void padlock_do_sha256(const char *in, char *out, int count) 118static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
119{
120 u8 buf[4];
121
122 return padlock_sha1_finup(desc, buf, 0, out);
123}
124
125static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
126 unsigned int count, u8 *out)
125{ 127{
126 /* We can't store directly to *out as it may be unaligned. */ 128 /* We can't store directly to *out as it may be unaligned. */
127 /* BTW Don't reduce the buffer size below 128 Bytes! 129 /* BTW Don't reduce the buffer size below 128 Bytes!
128 * PadLock microcode needs it that big. */ 130 * PadLock microcode needs it that big. */
129 char buf[128+16]; 131 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
130 char *result = NEAREST_ALIGNED(buf); 132 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
133 struct sha256_state state;
134 unsigned int space;
135 unsigned int leftover;
131 int ts_state; 136 int ts_state;
137 int err;
138
139 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
140 err = crypto_shash_export(&dctx->fallback, &state);
141 if (err)
142 goto out;
143
144 if (state.count + count > ULONG_MAX)
145 return crypto_shash_finup(&dctx->fallback, in, count, out);
146
147 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
148 space = SHA256_BLOCK_SIZE - leftover;
149 if (space) {
150 if (count > space) {
151 err = crypto_shash_update(&dctx->fallback, in, space) ?:
152 crypto_shash_export(&dctx->fallback, &state);
153 if (err)
154 goto out;
155 count -= space;
156 in += space;
157 } else {
158 memcpy(state.buf + leftover, in, count);
159 in = state.buf;
160 count += leftover;
161 state.count &= ~(SHA1_BLOCK_SIZE - 1);
162 }
163 }
132 164
133 ((uint32_t *)result)[0] = SHA256_H0; 165 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
134 ((uint32_t *)result)[1] = SHA256_H1;
135 ((uint32_t *)result)[2] = SHA256_H2;
136 ((uint32_t *)result)[3] = SHA256_H3;
137 ((uint32_t *)result)[4] = SHA256_H4;
138 ((uint32_t *)result)[5] = SHA256_H5;
139 ((uint32_t *)result)[6] = SHA256_H6;
140 ((uint32_t *)result)[7] = SHA256_H7;
141 166
142 /* prevent taking the spurious DNA fault with padlock. */ 167 /* prevent taking the spurious DNA fault with padlock. */
143 ts_state = irq_ts_save(); 168 ts_state = irq_ts_save();
144 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ 169 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
145 : "+S"(in), "+D"(result) 170 : \
146 : "c"(count), "a"(0)); 171 : "c"((unsigned long)state.count + count), \
172 "a"((unsigned long)state.count), \
173 "S"(in), "D"(result));
147 irq_ts_restore(ts_state); 174 irq_ts_restore(ts_state);
148 175
149 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); 176 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
177
178out:
179 return err;
150} 180}
151 181
152static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) 182static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
153{ 183{
154 if (unlikely(ctx(tfm)->bypass)) { 184 u8 buf[4];
155 crypto_hash_final(&ctx(tfm)->fallback, out);
156 ctx(tfm)->bypass = 0;
157 return;
158 }
159 185
160 /* Pass the input buffer to PadLock microcode... */ 186 return padlock_sha256_finup(desc, buf, 0, out);
161 ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
162
163 ctx(tfm)->used = 0;
164} 187}
165 188
166static int padlock_cra_init(struct crypto_tfm *tfm) 189static int padlock_cra_init(struct crypto_tfm *tfm)
167{ 190{
191 struct crypto_shash *hash = __crypto_shash_cast(tfm);
168 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 192 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
169 struct crypto_hash *fallback_tfm; 193 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
170 194 struct crypto_shash *fallback_tfm;
171 /* For now we'll allocate one page. This 195 int err = -ENOMEM;
172 * could eventually be configurable one day. */
173 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
174 if (!ctx(tfm)->data)
175 return -ENOMEM;
176 196
177 /* Allocate a fallback and abort if it failed. */ 197 /* Allocate a fallback and abort if it failed. */
178 fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, 198 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
179 CRYPTO_ALG_ASYNC | 199 CRYPTO_ALG_NEED_FALLBACK);
180 CRYPTO_ALG_NEED_FALLBACK);
181 if (IS_ERR(fallback_tfm)) { 200 if (IS_ERR(fallback_tfm)) {
182 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", 201 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
183 fallback_driver_name); 202 fallback_driver_name);
184 free_page((unsigned long)(ctx(tfm)->data)); 203 err = PTR_ERR(fallback_tfm);
185 return PTR_ERR(fallback_tfm); 204 goto out;
186 } 205 }
187 206
188 ctx(tfm)->fallback.tfm = fallback_tfm; 207 ctx->fallback = fallback_tfm;
208 hash->descsize += crypto_shash_descsize(fallback_tfm);
189 return 0; 209 return 0;
190}
191
192static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
193{
194 ctx(tfm)->f_sha_padlock = padlock_do_sha1;
195 210
196 return padlock_cra_init(tfm); 211out:
197} 212 return err;
198
199static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
200{
201 ctx(tfm)->f_sha_padlock = padlock_do_sha256;
202
203 return padlock_cra_init(tfm);
204} 213}
205 214
206static void padlock_cra_exit(struct crypto_tfm *tfm) 215static void padlock_cra_exit(struct crypto_tfm *tfm)
207{ 216{
208 if (ctx(tfm)->data) { 217 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
209 free_page((unsigned long)(ctx(tfm)->data));
210 ctx(tfm)->data = NULL;
211 }
212 218
213 crypto_free_hash(ctx(tfm)->fallback.tfm); 219 crypto_free_shash(ctx->fallback);
214 ctx(tfm)->fallback.tfm = NULL;
215} 220}
216 221
217static struct crypto_alg sha1_alg = { 222static struct shash_alg sha1_alg = {
218 .cra_name = "sha1", 223 .digestsize = SHA1_DIGEST_SIZE,
219 .cra_driver_name = "sha1-padlock", 224 .init = padlock_sha_init,
220 .cra_priority = PADLOCK_CRA_PRIORITY, 225 .update = padlock_sha_update,
221 .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 226 .finup = padlock_sha1_finup,
222 CRYPTO_ALG_NEED_FALLBACK, 227 .final = padlock_sha1_final,
223 .cra_blocksize = SHA1_BLOCK_SIZE, 228 .descsize = sizeof(struct padlock_sha_desc),
224 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 229 .base = {
225 .cra_module = THIS_MODULE, 230 .cra_name = "sha1",
226 .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), 231 .cra_driver_name = "sha1-padlock",
227 .cra_init = padlock_sha1_cra_init, 232 .cra_priority = PADLOCK_CRA_PRIORITY,
228 .cra_exit = padlock_cra_exit, 233 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
229 .cra_u = { 234 CRYPTO_ALG_NEED_FALLBACK,
230 .digest = { 235 .cra_blocksize = SHA1_BLOCK_SIZE,
231 .dia_digestsize = SHA1_DIGEST_SIZE, 236 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
232 .dia_init = padlock_sha_init, 237 .cra_module = THIS_MODULE,
233 .dia_update = padlock_sha_update, 238 .cra_init = padlock_cra_init,
234 .dia_final = padlock_sha_final, 239 .cra_exit = padlock_cra_exit,
235 }
236 } 240 }
237}; 241};
238 242
239static struct crypto_alg sha256_alg = { 243static struct shash_alg sha256_alg = {
240 .cra_name = "sha256", 244 .digestsize = SHA256_DIGEST_SIZE,
241 .cra_driver_name = "sha256-padlock", 245 .init = padlock_sha_init,
242 .cra_priority = PADLOCK_CRA_PRIORITY, 246 .update = padlock_sha_update,
243 .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 247 .finup = padlock_sha256_finup,
244 CRYPTO_ALG_NEED_FALLBACK, 248 .final = padlock_sha256_final,
245 .cra_blocksize = SHA256_BLOCK_SIZE, 249 .descsize = sizeof(struct padlock_sha_desc),
246 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 250 .base = {
247 .cra_module = THIS_MODULE, 251 .cra_name = "sha256",
248 .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), 252 .cra_driver_name = "sha256-padlock",
249 .cra_init = padlock_sha256_cra_init, 253 .cra_priority = PADLOCK_CRA_PRIORITY,
250 .cra_exit = padlock_cra_exit, 254 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
251 .cra_u = { 255 CRYPTO_ALG_NEED_FALLBACK,
252 .digest = { 256 .cra_blocksize = SHA256_BLOCK_SIZE,
253 .dia_digestsize = SHA256_DIGEST_SIZE, 257 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
254 .dia_init = padlock_sha_init, 258 .cra_module = THIS_MODULE,
255 .dia_update = padlock_sha_update, 259 .cra_init = padlock_cra_init,
256 .dia_final = padlock_sha_final, 260 .cra_exit = padlock_cra_exit,
257 }
258 } 261 }
259}; 262};
260 263
@@ -272,11 +275,11 @@ static int __init padlock_init(void)
272 return -ENODEV; 275 return -ENODEV;
273 } 276 }
274 277
275 rc = crypto_register_alg(&sha1_alg); 278 rc = crypto_register_shash(&sha1_alg);
276 if (rc) 279 if (rc)
277 goto out; 280 goto out;
278 281
279 rc = crypto_register_alg(&sha256_alg); 282 rc = crypto_register_shash(&sha256_alg);
280 if (rc) 283 if (rc)
281 goto out_unreg1; 284 goto out_unreg1;
282 285
@@ -285,7 +288,7 @@ static int __init padlock_init(void)
285 return 0; 288 return 0;
286 289
287out_unreg1: 290out_unreg1:
288 crypto_unregister_alg(&sha1_alg); 291 crypto_unregister_shash(&sha1_alg);
289out: 292out:
290 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); 293 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
291 return rc; 294 return rc;
@@ -293,8 +296,8 @@ out:
293 296
294static void __exit padlock_fini(void) 297static void __exit padlock_fini(void)
295{ 298{
296 crypto_unregister_alg(&sha1_alg); 299 crypto_unregister_shash(&sha1_alg);
297 crypto_unregister_alg(&sha256_alg); 300 crypto_unregister_shash(&sha256_alg);
298} 301}
299 302
300module_init(padlock_init); 303module_init(padlock_init);