diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2005-07-06 16:53:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-07-06 16:53:29 -0400 |
commit | fbdae9f3e7fb57c07cb0d973f113eb25da2e8ff2 (patch) | |
tree | b20909c92c2d48ab449343967b1c365732d7e4ff /crypto/cipher.c | |
parent | 176c3652c544b6f8d4bb1984c58c10080f45dbf0 (diff) |
[CRYPTO] Ensure cit_iv is aligned correctly
This patch ensures that cit_iv is aligned according to cra_alignmask
by allocating it as part of the tfm structure. As a side effect the
crypto layer will also guarantee that the tfm ctx area has enough space
to be aligned by cra_alignmask. This allows us to remove the extra
space reservation from the Padlock driver.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'crypto/cipher.c')
-rw-r--r-- | crypto/cipher.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/crypto/cipher.c b/crypto/cipher.c index 85eb12f8e564..d3295ce14a57 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c | |||
@@ -41,7 +41,7 @@ static unsigned int crypt_slow(const struct cipher_desc *desc, | |||
41 | struct scatter_walk *in, | 41 | struct scatter_walk *in, |
42 | struct scatter_walk *out, unsigned int bsize) | 42 | struct scatter_walk *out, unsigned int bsize) |
43 | { | 43 | { |
44 | unsigned int alignmask = desc->tfm->__crt_alg->cra_alignmask; | 44 | unsigned int alignmask = crypto_tfm_alg_alignmask(desc->tfm); |
45 | u8 buffer[bsize * 2 + alignmask]; | 45 | u8 buffer[bsize * 2 + alignmask]; |
46 | u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 46 | u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
47 | u8 *dst = src + bsize; | 47 | u8 *dst = src + bsize; |
@@ -98,7 +98,7 @@ static int crypt(const struct cipher_desc *desc, | |||
98 | struct scatter_walk walk_in, walk_out; | 98 | struct scatter_walk walk_in, walk_out; |
99 | struct crypto_tfm *tfm = desc->tfm; | 99 | struct crypto_tfm *tfm = desc->tfm; |
100 | const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); | 100 | const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); |
101 | unsigned int alignmask = tfm->__crt_alg->cra_alignmask; | 101 | unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); |
102 | unsigned long buffer = 0; | 102 | unsigned long buffer = 0; |
103 | 103 | ||
104 | if (!nbytes) | 104 | if (!nbytes) |
@@ -399,6 +399,8 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm) | |||
399 | } | 399 | } |
400 | 400 | ||
401 | if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) { | 401 | if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) { |
402 | unsigned int align; | ||
403 | unsigned long addr; | ||
402 | 404 | ||
403 | switch (crypto_tfm_alg_blocksize(tfm)) { | 405 | switch (crypto_tfm_alg_blocksize(tfm)) { |
404 | case 8: | 406 | case 8: |
@@ -418,9 +420,11 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm) | |||
418 | } | 420 | } |
419 | 421 | ||
420 | ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm); | 422 | ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm); |
421 | ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL); | 423 | align = crypto_tfm_alg_alignmask(tfm) + 1; |
422 | if (ops->cit_iv == NULL) | 424 | addr = (unsigned long)crypto_tfm_ctx(tfm); |
423 | ret = -ENOMEM; | 425 | addr = ALIGN(addr, align); |
426 | addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); | ||
427 | ops->cit_iv = (void *)addr; | ||
424 | } | 428 | } |
425 | 429 | ||
426 | out: | 430 | out: |
@@ -429,5 +433,4 @@ out: | |||
429 | 433 | ||
430 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm) | 434 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm) |
431 | { | 435 | { |
432 | kfree(tfm->crt_cipher.cit_iv); | ||
433 | } | 436 | } |