aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/ctr.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-11-29 08:23:53 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2008-01-10 16:16:24 -0500
commit0971eb0de9446b66bd45696338f54948314db379 (patch)
tree379cd3396219f9187a23ea6c57bae62de1d4cef9 /crypto/ctr.c
parent06e1a8f0505426a97292174a959560fd86ea0a3d (diff)
[CRYPTO] ctr: Fix multi-page processing
When the data spans across a page boundary, CTR may incorrectly process a partial block in the middle because the blkcipher walking code may supply partial blocks in the middle as long as the total length of the supplied data is more than a block. CTR is supposed to return any unused partial block in that case to the walker. This patch fixes this by doing exactly that, returning partial blocks to the walker unless we received less than a block-worth of data to start with. This also allows us to optimise the bulk of the processing since we no longer have to worry about partial blocks until the very end. Thanks to Tan Swee Heng for fixes and actually testing this :) Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/ctr.c')
-rw-r--r--crypto/ctr.c64
1 files changed, 33 insertions, 31 deletions
diff --git a/crypto/ctr.c b/crypto/ctr.c
index b816e959fa55..57da7d0affcb 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -59,6 +59,21 @@ static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
59 return err; 59 return err;
60} 60}
61 61
62static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
63 struct crypto_cipher *tfm, u8 *ctrblk,
64 unsigned int countersize)
65{
66 unsigned int bsize = crypto_cipher_blocksize(tfm);
67 u8 *keystream = ctrblk + bsize;
68 u8 *src = walk->src.virt.addr;
69 u8 *dst = walk->dst.virt.addr;
70 unsigned int nbytes = walk->nbytes;
71
72 crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
73 crypto_xor(keystream, src, nbytes);
74 memcpy(dst, keystream, nbytes);
75}
76
62static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, 77static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
63 struct crypto_cipher *tfm, u8 *ctrblk, 78 struct crypto_cipher *tfm, u8 *ctrblk,
64 unsigned int countersize) 79 unsigned int countersize)
@@ -66,35 +81,23 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
66 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 81 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
67 crypto_cipher_alg(tfm)->cia_encrypt; 82 crypto_cipher_alg(tfm)->cia_encrypt;
68 unsigned int bsize = crypto_cipher_blocksize(tfm); 83 unsigned int bsize = crypto_cipher_blocksize(tfm);
69 unsigned long alignmask = crypto_cipher_alignmask(tfm) |
70 (__alignof__(u32) - 1);
71 u8 ks[bsize + alignmask];
72 u8 *keystream = (u8 *)ALIGN((unsigned long)ks, alignmask + 1);
73 u8 *src = walk->src.virt.addr; 84 u8 *src = walk->src.virt.addr;
74 u8 *dst = walk->dst.virt.addr; 85 u8 *dst = walk->dst.virt.addr;
75 unsigned int nbytes = walk->nbytes; 86 unsigned int nbytes = walk->nbytes;
76 87
77 do { 88 do {
78 /* create keystream */ 89 /* create keystream */
79 fn(crypto_cipher_tfm(tfm), keystream, ctrblk); 90 fn(crypto_cipher_tfm(tfm), dst, ctrblk);
80 crypto_xor(keystream, src, min(nbytes, bsize)); 91 crypto_xor(dst, src, bsize);
81
82 /* copy result into dst */
83 memcpy(dst, keystream, min(nbytes, bsize));
84 92
85 /* increment counter in counterblock */ 93 /* increment counter in counterblock */
86 crypto_inc(ctrblk + bsize - countersize, countersize); 94 crypto_inc(ctrblk + bsize - countersize, countersize);
87 95
88 if (nbytes < bsize)
89 break;
90
91 src += bsize; 96 src += bsize;
92 dst += bsize; 97 dst += bsize;
93 nbytes -= bsize; 98 } while ((nbytes -= bsize) >= bsize);
94
95 } while (nbytes);
96 99
97 return 0; 100 return nbytes;
98} 101}
99 102
100static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, 103static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
@@ -104,30 +107,22 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
104 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 107 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
105 crypto_cipher_alg(tfm)->cia_encrypt; 108 crypto_cipher_alg(tfm)->cia_encrypt;
106 unsigned int bsize = crypto_cipher_blocksize(tfm); 109 unsigned int bsize = crypto_cipher_blocksize(tfm);
107 unsigned long alignmask = crypto_cipher_alignmask(tfm) |
108 (__alignof__(u32) - 1);
109 unsigned int nbytes = walk->nbytes; 110 unsigned int nbytes = walk->nbytes;
110 u8 *src = walk->src.virt.addr; 111 u8 *src = walk->src.virt.addr;
111 u8 ks[bsize + alignmask]; 112 u8 *keystream = ctrblk + bsize;
112 u8 *keystream = (u8 *)ALIGN((unsigned long)ks, alignmask + 1);
113 113
114 do { 114 do {
115 /* create keystream */ 115 /* create keystream */
116 fn(crypto_cipher_tfm(tfm), keystream, ctrblk); 116 fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
117 crypto_xor(src, keystream, min(nbytes, bsize)); 117 crypto_xor(src, keystream, bsize);
118 118
119 /* increment counter in counterblock */ 119 /* increment counter in counterblock */
120 crypto_inc(ctrblk + bsize - countersize, countersize); 120 crypto_inc(ctrblk + bsize - countersize, countersize);
121 121
122 if (nbytes < bsize)
123 break;
124
125 src += bsize; 122 src += bsize;
126 nbytes -= bsize; 123 } while ((nbytes -= bsize) >= bsize);
127 124
128 } while (nbytes); 125 return nbytes;
129
130 return 0;
131} 126}
132 127
133static int crypto_ctr_crypt(struct blkcipher_desc *desc, 128static int crypto_ctr_crypt(struct blkcipher_desc *desc,
@@ -143,7 +138,7 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc,
143 crypto_instance_ctx(crypto_tfm_alg_instance(&tfm->base)); 138 crypto_instance_ctx(crypto_tfm_alg_instance(&tfm->base));
144 unsigned long alignmask = crypto_cipher_alignmask(child) | 139 unsigned long alignmask = crypto_cipher_alignmask(child) |
145 (__alignof__(u32) - 1); 140 (__alignof__(u32) - 1);
146 u8 cblk[bsize + alignmask]; 141 u8 cblk[bsize * 2 + alignmask];
147 u8 *counterblk = (u8 *)ALIGN((unsigned long)cblk, alignmask + 1); 142 u8 *counterblk = (u8 *)ALIGN((unsigned long)cblk, alignmask + 1);
148 int err; 143 int err;
149 144
@@ -158,7 +153,7 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc,
158 /* initialize counter portion of counter block */ 153 /* initialize counter portion of counter block */
159 crypto_inc(counterblk + bsize - ictx->countersize, ictx->countersize); 154 crypto_inc(counterblk + bsize - ictx->countersize, ictx->countersize);
160 155
161 while (walk.nbytes) { 156 while (walk.nbytes >= bsize) {
162 if (walk.src.virt.addr == walk.dst.virt.addr) 157 if (walk.src.virt.addr == walk.dst.virt.addr)
163 nbytes = crypto_ctr_crypt_inplace(&walk, child, 158 nbytes = crypto_ctr_crypt_inplace(&walk, child,
164 counterblk, 159 counterblk,
@@ -170,6 +165,13 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc,
170 165
171 err = blkcipher_walk_done(desc, &walk, nbytes); 166 err = blkcipher_walk_done(desc, &walk, nbytes);
172 } 167 }
168
169 if (walk.nbytes) {
170 crypto_ctr_crypt_final(&walk, child, counterblk,
171 ictx->countersize);
172 err = blkcipher_walk_done(desc, &walk, 0);
173 }
174
173 return err; 175 return err;
174} 176}
175 177
@@ -277,7 +279,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
277 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 279 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
278 inst->alg.cra_priority = alg->cra_priority; 280 inst->alg.cra_priority = alg->cra_priority;
279 inst->alg.cra_blocksize = 1; 281 inst->alg.cra_blocksize = 1;
280 inst->alg.cra_alignmask = __alignof__(u32) - 1; 282 inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
281 inst->alg.cra_type = &crypto_blkcipher_type; 283 inst->alg.cra_type = &crypto_blkcipher_type;
282 284
283 inst->alg.cra_blkcipher.ivsize = ivsize; 285 inst->alg.cra_blkcipher.ivsize = ivsize;