aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-09-09 03:45:21 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2007-09-09 03:45:21 -0400
commite4630f9fd8cdc14eb1caa08dafe649eb5ae09985 (patch)
tree301b4b69b5780ecfa39c9c1f508d57e47da8af29 /crypto
parentb21010ed6498391c0f359f2a89c907533fe07fec (diff)
[CRYPTO] blkcipher: Fix handling of kmalloc page straddling
The function blkcipher_get_spot tries to return a buffer of the specified length that does not straddle a page. It has an off-by-one bug so it may advance a page unnecessarily. What's worse, one of its callers doesn't provide a buffer that's sufficiently long for this operation. This patch fixes both problems. Thanks to Bob Gilligan for diagnosing this problem and providing a fix. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/blkcipher.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 7755834b8846..97586afbe63b 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -59,11 +59,13 @@ static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
59 scatterwalk_unmap(walk->dst.virt.addr, 1); 59 scatterwalk_unmap(walk->dst.virt.addr, 1);
60} 60}
61 61
62/* Get a spot of the specified length that does not straddle a page.
63 * The caller needs to ensure that there is enough space for this operation.
64 */
62static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) 65static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
63{ 66{
64 if (offset_in_page(start + len) < len) 67 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
65 return (u8 *)((unsigned long)(start + len) & PAGE_MASK); 68 return start < end_page ? start : end_page;
66 return start;
67} 69}
68 70
69static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, 71static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
@@ -155,7 +157,8 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
155 if (walk->buffer) 157 if (walk->buffer)
156 goto ok; 158 goto ok;
157 159
158 n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 160 n = bsize * 3 - (alignmask + 1) +
161 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
159 walk->buffer = kmalloc(n, GFP_ATOMIC); 162 walk->buffer = kmalloc(n, GFP_ATOMIC);
160 if (!walk->buffer) 163 if (!walk->buffer)
161 return blkcipher_walk_done(desc, walk, -ENOMEM); 164 return blkcipher_walk_done(desc, walk, -ENOMEM);