diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2019-08-09 13:14:57 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2019-08-15 07:52:14 -0400 |
commit | 8083b1bf8163e7ae7d8c90f221106d96450b8aa8 (patch) | |
tree | c9f45ebd58eb3c5382f975b8422da3e2a0f927f8 | |
parent | a62084d299d950f2ad0649caf9a9b9a431346996 (diff) |
crypto: xts - add support for ciphertext stealing
Add support for the missing ciphertext stealing part of the XTS-AES
specification, which permits inputs of any size >= the block size.
Cc: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Tested-by: Milan Broz <gmazyland@gmail.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | crypto/xts.c | 152 |
1 files changed, 132 insertions, 20 deletions
diff --git a/crypto/xts.c b/crypto/xts.c index 11211003db7e..ab117633d64e 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -1,8 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* XTS: as defined in IEEE1619/D16 | 2 | /* XTS: as defined in IEEE1619/D16 |
3 | * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf | 3 | * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf |
4 | * (sector sizes which are not a multiple of 16 bytes are, | ||
5 | * however currently unsupported) | ||
6 | * | 4 | * |
7 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> | 5 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> |
8 | * | 6 | * |
@@ -34,6 +32,8 @@ struct xts_instance_ctx { | |||
34 | 32 | ||
35 | struct rctx { | 33 | struct rctx { |
36 | le128 t; | 34 | le128 t; |
35 | struct scatterlist *tail; | ||
36 | struct scatterlist sg[2]; | ||
37 | struct skcipher_request subreq; | 37 | struct skcipher_request subreq; |
38 | }; | 38 | }; |
39 | 39 | ||
@@ -84,10 +84,11 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, | |||
84 | * mutliple calls to the 'ecb(..)' instance, which usually would be slower than | 84 | * mutliple calls to the 'ecb(..)' instance, which usually would be slower than |
85 | * just doing the gf128mul_x_ble() calls again. | 85 | * just doing the gf128mul_x_ble() calls again. |
86 | */ | 86 | */ |
87 | static int xor_tweak(struct skcipher_request *req, bool second_pass) | 87 | static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) |
88 | { | 88 | { |
89 | struct rctx *rctx = skcipher_request_ctx(req); | 89 | struct rctx *rctx = skcipher_request_ctx(req); |
90 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 90 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
91 | const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); | ||
91 | const int bs = XTS_BLOCK_SIZE; | 92 | const int bs = XTS_BLOCK_SIZE; |
92 | struct skcipher_walk w; | 93 | struct skcipher_walk w; |
93 | le128 t = rctx->t; | 94 | le128 t = rctx->t; |
@@ -109,6 +110,20 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) | |||
109 | wdst = w.dst.virt.addr; | 110 | wdst = w.dst.virt.addr; |
110 | 111 | ||
111 | do { | 112 | do { |
113 | if (unlikely(cts) && | ||
114 | w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) { | ||
115 | if (!enc) { | ||
116 | if (second_pass) | ||
117 | rctx->t = t; | ||
118 | gf128mul_x_ble(&t, &t); | ||
119 | } | ||
120 | le128_xor(wdst, &t, wsrc); | ||
121 | if (enc && second_pass) | ||
122 | gf128mul_x_ble(&rctx->t, &t); | ||
123 | skcipher_walk_done(&w, avail - bs); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
112 | le128_xor(wdst++, &t, wsrc++); | 127 | le128_xor(wdst++, &t, wsrc++); |
113 | gf128mul_x_ble(&t, &t); | 128 | gf128mul_x_ble(&t, &t); |
114 | } while ((avail -= bs) >= bs); | 129 | } while ((avail -= bs) >= bs); |
@@ -119,17 +134,71 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) | |||
119 | return err; | 134 | return err; |
120 | } | 135 | } |
121 | 136 | ||
122 | static int xor_tweak_pre(struct skcipher_request *req) | 137 | static int xor_tweak_pre(struct skcipher_request *req, bool enc) |
123 | { | 138 | { |
124 | return xor_tweak(req, false); | 139 | return xor_tweak(req, false, enc); |
125 | } | 140 | } |
126 | 141 | ||
127 | static int xor_tweak_post(struct skcipher_request *req) | 142 | static int xor_tweak_post(struct skcipher_request *req, bool enc) |
128 | { | 143 | { |
129 | return xor_tweak(req, true); | 144 | return xor_tweak(req, true, enc); |
130 | } | 145 | } |
131 | 146 | ||
132 | static void crypt_done(struct crypto_async_request *areq, int err) | 147 | static void cts_done(struct crypto_async_request *areq, int err) |
148 | { | ||
149 | struct skcipher_request *req = areq->data; | ||
150 | le128 b; | ||
151 | |||
152 | if (!err) { | ||
153 | struct rctx *rctx = skcipher_request_ctx(req); | ||
154 | |||
155 | scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); | ||
156 | le128_xor(&b, &rctx->t, &b); | ||
157 | scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); | ||
158 | } | ||
159 | |||
160 | skcipher_request_complete(req, err); | ||
161 | } | ||
162 | |||
163 | static int cts_final(struct skcipher_request *req, | ||
164 | int (*crypt)(struct skcipher_request *req)) | ||
165 | { | ||
166 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | ||
167 | int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); | ||
168 | struct rctx *rctx = skcipher_request_ctx(req); | ||
169 | struct skcipher_request *subreq = &rctx->subreq; | ||
170 | int tail = req->cryptlen % XTS_BLOCK_SIZE; | ||
171 | le128 b[2]; | ||
172 | int err; | ||
173 | |||
174 | rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst, | ||
175 | offset - XTS_BLOCK_SIZE); | ||
176 | |||
177 | scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); | ||
178 | memcpy(b + 1, b, tail); | ||
179 | scatterwalk_map_and_copy(b, req->src, offset, tail, 0); | ||
180 | |||
181 | le128_xor(b, &rctx->t, b); | ||
182 | |||
183 | scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); | ||
184 | |||
185 | skcipher_request_set_tfm(subreq, ctx->child); | ||
186 | skcipher_request_set_callback(subreq, req->base.flags, cts_done, req); | ||
187 | skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, | ||
188 | XTS_BLOCK_SIZE, NULL); | ||
189 | |||
190 | err = crypt(subreq); | ||
191 | if (err) | ||
192 | return err; | ||
193 | |||
194 | scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); | ||
195 | le128_xor(b, &rctx->t, b); | ||
196 | scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void encrypt_done(struct crypto_async_request *areq, int err) | ||
133 | { | 202 | { |
134 | struct skcipher_request *req = areq->data; | 203 | struct skcipher_request *req = areq->data; |
135 | 204 | ||
@@ -137,47 +206,90 @@ static void crypt_done(struct crypto_async_request *areq, int err) | |||
137 | struct rctx *rctx = skcipher_request_ctx(req); | 206 | struct rctx *rctx = skcipher_request_ctx(req); |
138 | 207 | ||
139 | rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 208 | rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
140 | err = xor_tweak_post(req); | 209 | err = xor_tweak_post(req, true); |
210 | |||
211 | if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { | ||
212 | err = cts_final(req, crypto_skcipher_encrypt); | ||
213 | if (err == -EINPROGRESS) | ||
214 | return; | ||
215 | } | ||
141 | } | 216 | } |
142 | 217 | ||
143 | skcipher_request_complete(req, err); | 218 | skcipher_request_complete(req, err); |
144 | } | 219 | } |
145 | 220 | ||
146 | static void init_crypt(struct skcipher_request *req) | 221 | static void decrypt_done(struct crypto_async_request *areq, int err) |
222 | { | ||
223 | struct skcipher_request *req = areq->data; | ||
224 | |||
225 | if (!err) { | ||
226 | struct rctx *rctx = skcipher_request_ctx(req); | ||
227 | |||
228 | rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
229 | err = xor_tweak_post(req, false); | ||
230 | |||
231 | if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { | ||
232 | err = cts_final(req, crypto_skcipher_decrypt); | ||
233 | if (err == -EINPROGRESS) | ||
234 | return; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | skcipher_request_complete(req, err); | ||
239 | } | ||
240 | |||
241 | static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) | ||
147 | { | 242 | { |
148 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | 243 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
149 | struct rctx *rctx = skcipher_request_ctx(req); | 244 | struct rctx *rctx = skcipher_request_ctx(req); |
150 | struct skcipher_request *subreq = &rctx->subreq; | 245 | struct skcipher_request *subreq = &rctx->subreq; |
151 | 246 | ||
247 | if (req->cryptlen < XTS_BLOCK_SIZE) | ||
248 | return -EINVAL; | ||
249 | |||
152 | skcipher_request_set_tfm(subreq, ctx->child); | 250 | skcipher_request_set_tfm(subreq, ctx->child); |
153 | skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); | 251 | skcipher_request_set_callback(subreq, req->base.flags, compl, req); |
154 | skcipher_request_set_crypt(subreq, req->dst, req->dst, | 252 | skcipher_request_set_crypt(subreq, req->dst, req->dst, |
155 | req->cryptlen, NULL); | 253 | req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL); |
156 | 254 | ||
157 | /* calculate first value of T */ | 255 | /* calculate first value of T */ |
158 | crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); | 256 | crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); |
257 | |||
258 | return 0; | ||
159 | } | 259 | } |
160 | 260 | ||
161 | static int encrypt(struct skcipher_request *req) | 261 | static int encrypt(struct skcipher_request *req) |
162 | { | 262 | { |
163 | struct rctx *rctx = skcipher_request_ctx(req); | 263 | struct rctx *rctx = skcipher_request_ctx(req); |
164 | struct skcipher_request *subreq = &rctx->subreq; | 264 | struct skcipher_request *subreq = &rctx->subreq; |
265 | int err; | ||
165 | 266 | ||
166 | init_crypt(req); | 267 | err = init_crypt(req, encrypt_done) ?: |
167 | return xor_tweak_pre(req) ?: | 268 | xor_tweak_pre(req, true) ?: |
168 | crypto_skcipher_encrypt(subreq) ?: | 269 | crypto_skcipher_encrypt(subreq) ?: |
169 | xor_tweak_post(req); | 270 | xor_tweak_post(req, true); |
271 | |||
272 | if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) | ||
273 | return err; | ||
274 | |||
275 | return cts_final(req, crypto_skcipher_encrypt); | ||
170 | } | 276 | } |
171 | 277 | ||
172 | static int decrypt(struct skcipher_request *req) | 278 | static int decrypt(struct skcipher_request *req) |
173 | { | 279 | { |
174 | struct rctx *rctx = skcipher_request_ctx(req); | 280 | struct rctx *rctx = skcipher_request_ctx(req); |
175 | struct skcipher_request *subreq = &rctx->subreq; | 281 | struct skcipher_request *subreq = &rctx->subreq; |
282 | int err; | ||
283 | |||
284 | err = init_crypt(req, decrypt_done) ?: | ||
285 | xor_tweak_pre(req, false) ?: | ||
286 | crypto_skcipher_decrypt(subreq) ?: | ||
287 | xor_tweak_post(req, false); | ||
288 | |||
289 | if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) | ||
290 | return err; | ||
176 | 291 | ||
177 | init_crypt(req); | 292 | return cts_final(req, crypto_skcipher_decrypt); |
178 | return xor_tweak_pre(req) ?: | ||
179 | crypto_skcipher_decrypt(subreq) ?: | ||
180 | xor_tweak_post(req); | ||
181 | } | 293 | } |
182 | 294 | ||
183 | static int init_tfm(struct crypto_skcipher *tfm) | 295 | static int init_tfm(struct crypto_skcipher *tfm) |