aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-09-29 09:24:23 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:55:46 -0400
commit70613783fc0f6e37b442d79e8417f71a2b71ed93 (patch)
tree94cf5beb318c16e608fb7e03d4eed7decaaeefc1 /crypto
parente4c5c6c9b0d04a7dac19027260f7421305a34856 (diff)
[CRYPTO] blkcipher: Remove alignment restriction on block size
Previously we assumed for convenience that the block size is a multiple of the algorithm's required alignment. With the pending addition of CTR this will no longer be the case as the block size will be 1 due to it being a stream cipher. However, the alignment requirement will be that of the underlying implementation which will most likely be greater than 1. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/algapi.c3
-rw-r--r--crypto/blkcipher.c13
2 files changed, 8 insertions, 8 deletions
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d891f56f0e8c..58cc19164801 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -63,9 +63,6 @@ static int crypto_check_alg(struct crypto_alg *alg)
63 if (alg->cra_alignmask & (alg->cra_alignmask + 1)) 63 if (alg->cra_alignmask & (alg->cra_alignmask + 1))
64 return -EINVAL; 64 return -EINVAL;
65 65
66 if (alg->cra_alignmask & alg->cra_blocksize)
67 return -EINVAL;
68
69 if (alg->cra_blocksize > PAGE_SIZE / 8) 66 if (alg->cra_blocksize > PAGE_SIZE / 8)
70 return -EINVAL; 67 return -EINVAL;
71 68
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 9c49770837c2..a3c87da23f1e 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -149,6 +149,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
149 unsigned int alignmask) 149 unsigned int alignmask)
150{ 150{
151 unsigned int n; 151 unsigned int n;
152 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
152 153
153 if (walk->buffer) 154 if (walk->buffer)
154 goto ok; 155 goto ok;
@@ -167,8 +168,8 @@ ok:
167 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, 168 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
168 alignmask + 1); 169 alignmask + 1);
169 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); 170 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
170 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize, 171 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
171 bsize); 172 aligned_bsize, bsize);
172 173
173 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); 174 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
174 175
@@ -278,7 +279,9 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
278{ 279{
279 unsigned bs = crypto_blkcipher_blocksize(tfm); 280 unsigned bs = crypto_blkcipher_blocksize(tfm);
280 unsigned int ivsize = crypto_blkcipher_ivsize(tfm); 281 unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
281 unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1); 282 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
283 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
284 (alignmask + 1);
282 u8 *iv; 285 u8 *iv;
283 286
284 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); 287 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
@@ -287,8 +290,8 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
287 return -ENOMEM; 290 return -ENOMEM;
288 291
289 iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); 292 iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
290 iv = blkcipher_get_spot(iv, bs) + bs; 293 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
291 iv = blkcipher_get_spot(iv, bs) + bs; 294 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
292 iv = blkcipher_get_spot(iv, ivsize); 295 iv = blkcipher_get_spot(iv, ivsize);
293 296
294 walk->iv = memcpy(iv, walk->iv, ivsize); 297 walk->iv = memcpy(iv, walk->iv, ivsize);