aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2009-07-10 05:26:44 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2009-07-11 06:23:35 -0400
commit7d024608265eb815ae4ce1e5da097ec9d800dda4 (patch)
treed6c4418ecc18718ffdbd406116c8291b4ec97e42 /drivers
parentf63559bef380a95093408691c1081f07da755b74 (diff)
crypto: padlock - Use shash fallback for sha
This patch changes padlock sha fallback to shash instead of hash. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/padlock-sha.c82
1 files changed, 52 insertions, 30 deletions
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a2c8e8514b63..868da54b1850 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -12,28 +12,24 @@
12 * 12 *
13 */ 13 */
14 14
15#include <crypto/algapi.h> 15#include <crypto/internal/hash.h>
16#include <crypto/sha.h> 16#include <crypto/sha.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/cryptohash.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/i387.h> 24#include <asm/i387.h>
26#include "padlock.h" 25#include "padlock.h"
27 26
28#define SHA1_DEFAULT_FALLBACK "sha1-generic"
29#define SHA256_DEFAULT_FALLBACK "sha256-generic"
30
31struct padlock_sha_ctx { 27struct padlock_sha_ctx {
32 char *data; 28 char *data;
33 size_t used; 29 size_t used;
34 int bypass; 30 int bypass;
35 void (*f_sha_padlock)(const char *in, char *out, int count); 31 void (*f_sha_padlock)(const char *in, char *out, int count);
36 struct hash_desc fallback; 32 struct shash_desc *fallback;
37}; 33};
38 34
39static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) 35static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
@@ -47,21 +43,26 @@ static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
47 43
48static struct crypto_alg sha1_alg, sha256_alg; 44static struct crypto_alg sha1_alg, sha256_alg;
49 45
50static void padlock_sha_bypass(struct crypto_tfm *tfm) 46static int padlock_sha_bypass(struct crypto_tfm *tfm)
51{ 47{
48 int err = 0;
49
52 if (ctx(tfm)->bypass) 50 if (ctx(tfm)->bypass)
53 return; 51 goto out;
54 52
55 crypto_hash_init(&ctx(tfm)->fallback); 53 err = crypto_shash_init(ctx(tfm)->fallback);
56 if (ctx(tfm)->data && ctx(tfm)->used) { 54 if (err)
57 struct scatterlist sg; 55 goto out;
58 56
59 sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); 57 if (ctx(tfm)->data && ctx(tfm)->used)
60 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); 58 err = crypto_shash_update(ctx(tfm)->fallback, ctx(tfm)->data,
61 } 59 ctx(tfm)->used);
62 60
63 ctx(tfm)->used = 0; 61 ctx(tfm)->used = 0;
64 ctx(tfm)->bypass = 1; 62 ctx(tfm)->bypass = 1;
63
64out:
65 return err;
65} 66}
66 67
67static void padlock_sha_init(struct crypto_tfm *tfm) 68static void padlock_sha_init(struct crypto_tfm *tfm)
@@ -73,15 +74,18 @@ static void padlock_sha_init(struct crypto_tfm *tfm)
73static void padlock_sha_update(struct crypto_tfm *tfm, 74static void padlock_sha_update(struct crypto_tfm *tfm,
74 const uint8_t *data, unsigned int length) 75 const uint8_t *data, unsigned int length)
75{ 76{
77 int err;
78
76 /* Our buffer is always one page. */ 79 /* Our buffer is always one page. */
77 if (unlikely(!ctx(tfm)->bypass && 80 if (unlikely(!ctx(tfm)->bypass &&
78 (ctx(tfm)->used + length > PAGE_SIZE))) 81 (ctx(tfm)->used + length > PAGE_SIZE))) {
79 padlock_sha_bypass(tfm); 82 err = padlock_sha_bypass(tfm);
83 BUG_ON(err);
84 }
80 85
81 if (unlikely(ctx(tfm)->bypass)) { 86 if (unlikely(ctx(tfm)->bypass)) {
82 struct scatterlist sg; 87 err = crypto_shash_update(ctx(tfm)->fallback, data, length);
83 sg_init_one(&sg, (uint8_t *)data, length); 88 BUG_ON(err);
84 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
85 return; 89 return;
86 } 90 }
87 91
@@ -151,8 +155,11 @@ static void padlock_do_sha256(const char *in, char *out, int count)
151 155
152static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) 156static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
153{ 157{
158 int err;
159
154 if (unlikely(ctx(tfm)->bypass)) { 160 if (unlikely(ctx(tfm)->bypass)) {
155 crypto_hash_final(&ctx(tfm)->fallback, out); 161 err = crypto_shash_final(ctx(tfm)->fallback, out);
162 BUG_ON(err);
156 ctx(tfm)->bypass = 0; 163 ctx(tfm)->bypass = 0;
157 return; 164 return;
158 } 165 }
@@ -166,27 +173,41 @@ static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
166static int padlock_cra_init(struct crypto_tfm *tfm) 173static int padlock_cra_init(struct crypto_tfm *tfm)
167{ 174{
168 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 175 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
169 struct crypto_hash *fallback_tfm; 176 struct crypto_shash *fallback_tfm;
177 int err = -ENOMEM;
170 178
171 /* For now we'll allocate one page. This 179 /* For now we'll allocate one page. This
172 * could eventually be configurable one day. */ 180 * could eventually be configurable one day. */
173 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); 181 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
174 if (!ctx(tfm)->data) 182 if (!ctx(tfm)->data)
175 return -ENOMEM; 183 goto out;
176 184
177 /* Allocate a fallback and abort if it failed. */ 185 /* Allocate a fallback and abort if it failed. */
178 fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, 186 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
179 CRYPTO_ALG_ASYNC | 187 CRYPTO_ALG_NEED_FALLBACK);
180 CRYPTO_ALG_NEED_FALLBACK);
181 if (IS_ERR(fallback_tfm)) { 188 if (IS_ERR(fallback_tfm)) {
182 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", 189 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
183 fallback_driver_name); 190 fallback_driver_name);
184 free_page((unsigned long)(ctx(tfm)->data)); 191 err = PTR_ERR(fallback_tfm);
185 return PTR_ERR(fallback_tfm); 192 goto out_free_page;
186 } 193 }
187 194
188 ctx(tfm)->fallback.tfm = fallback_tfm; 195 ctx(tfm)->fallback = kmalloc(sizeof(struct shash_desc) +
196 crypto_shash_descsize(fallback_tfm),
197 GFP_KERNEL);
198 if (!ctx(tfm)->fallback)
199 goto out_free_tfm;
200
201 ctx(tfm)->fallback->tfm = fallback_tfm;
202 ctx(tfm)->fallback->flags = 0;
189 return 0; 203 return 0;
204
205out_free_tfm:
206 crypto_free_shash(fallback_tfm);
207out_free_page:
208 free_page((unsigned long)(ctx(tfm)->data));
209out:
210 return err;
190} 211}
191 212
192static int padlock_sha1_cra_init(struct crypto_tfm *tfm) 213static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
@@ -210,8 +231,9 @@ static void padlock_cra_exit(struct crypto_tfm *tfm)
210 ctx(tfm)->data = NULL; 231 ctx(tfm)->data = NULL;
211 } 232 }
212 233
213 crypto_free_hash(ctx(tfm)->fallback.tfm); 234 crypto_free_shash(ctx(tfm)->fallback->tfm);
214 ctx(tfm)->fallback.tfm = NULL; 235
236 kzfree(ctx(tfm)->fallback);
215} 237}
216 238
217static struct crypto_alg sha1_alg = { 239static struct crypto_alg sha1_alg = {